Compare commits
365 commits
Author | SHA1 | Date | |
---|---|---|---|
|
bb2eab21c6 | ||
|
cfc4677f62 | ||
|
978690e4f9 | ||
|
44eb640a1b | ||
|
69ef0358c3 | ||
|
fc72ed9760 | ||
|
da6bb8c408 | ||
|
a889a17a63 | ||
|
6f581c1808 | ||
|
de3143c6b9 | ||
|
f547f2f3c0 | ||
|
0c4b6b1742 | ||
|
f088bcadd5 | ||
|
5770145433 | ||
|
d15fe0d782 | ||
|
98040b95a7 | ||
|
546005804c | ||
|
a8184baf3b | ||
|
e571db3846 | ||
|
a913b5ad7e | ||
|
73a98393c6 | ||
|
acb8204a7f | ||
|
313f105443 | ||
|
2b1ba3ea6b | ||
|
f493b770a9 | ||
|
36430f7970 | ||
|
fb24b99a2b | ||
|
0c65191c49 | ||
|
bf78bf3458 | ||
|
dcf06b3f5f | ||
|
aba1d597bc | ||
|
e37ac41afb | ||
|
963e3ec65c | ||
|
526203dd7f | ||
|
f528e2ab96 | ||
|
3989be2f7b | ||
|
d406a5fd22 | ||
|
394f6c14ad | ||
|
77a01aaec7 | ||
|
df2427022a | ||
|
9e4c508b55 | ||
|
cb358e8a19 | ||
|
2bc33b4c26 | ||
|
3768c71d9e | ||
|
7b9e86f789 | ||
|
214e6363b3 | ||
|
5052c38846 | ||
|
d15be0c54d | ||
|
27982c186e | ||
|
9d990cbae8 | ||
|
3508cfb149 | ||
|
67633130c6 | ||
|
2e13f771f3 | ||
|
bbf3f33dc8 | ||
|
2dc7a1dc25 | ||
|
fc657692c7 | ||
|
e75fa6684c | ||
|
abe6b3dc9b | ||
|
297f224a92 | ||
|
f48f4dde24 | ||
|
cd8873dd3d | ||
|
2dce69e001 | ||
|
5c4dc48995 | ||
|
10fa0d5321 | ||
|
356f483038 | ||
|
c0edbfd621 | ||
|
e46e43470b | ||
|
d4e2341f93 | ||
|
e32bfd347c | ||
|
65c7f3bac3 | ||
|
5f35b157a3 | ||
|
76e132ed56 | ||
|
6f7ee1c942 | ||
|
f476deac40 | ||
|
11973d0c0a | ||
|
251610397c | ||
|
83f90039ef | ||
|
2fd846d40f | ||
|
f9ab209417 | ||
|
bfca3185ee | ||
|
7f45eb041c | ||
|
b76a60dee6 | ||
|
4acfbaba1e | ||
|
e749a31322 | ||
|
7370bbc034 | ||
|
38152f4d5b | ||
|
21feb1808d | ||
|
5e15ce3a4a | ||
|
92b96ac2ed | ||
|
e0b105623e | ||
|
9d86e1d204 | ||
|
3a946f5291 | ||
|
cf1e138ab1 | ||
|
7175841ebd | ||
|
f3e180b704 | ||
|
afdc9a804a | ||
|
e24277883f | ||
|
07e84005ac | ||
|
39d3d3db56 | ||
|
4b79d9078a | ||
|
1e0f2186a9 | ||
|
4404c36460 | ||
|
75634f9a1e | ||
|
ad11d3f232 | ||
|
cbaf1808cb | ||
|
03015fe6de | ||
|
fa3804f8ba | ||
|
4c1a3f096c | ||
|
09a2f7a667 | ||
|
02e02e512f | ||
|
24de1f7adc | ||
|
c4685540e4 | ||
|
5aac513617 | ||
|
80dc5186ec | ||
|
f9cb47a052 | ||
|
5202b5c781 | ||
|
28c34259c7 | ||
|
67ea873f61 | ||
|
f72c96c5c4 | ||
|
1bbb6f2454 | ||
|
c0be73f88d | ||
|
727c4fdee3 | ||
|
b4c4be1f22 | ||
|
7106874e39 | ||
|
4bef6f5510 | ||
|
f056df579a | ||
|
c062238ea4 | ||
|
20ff8a2380 | ||
|
ca99cab891 | ||
|
5829b244ec | ||
|
3bc8fccc1b | ||
|
4a96094bf5 | ||
|
00b44caa69 | ||
|
1fcb1dd728 | ||
|
aaa8f96cc9 | ||
|
671bf589e2 | ||
|
e1b240d6bd | ||
|
18a54ed59c | ||
|
0c66bc948a | ||
|
a12d359c1a | ||
|
2d12e69c9f | ||
|
33ab36d6b3 | ||
|
fa10084a82 | ||
|
43ce8f7d24 | ||
|
87d9d96ab0 | ||
|
a5ecbf4d22 | ||
|
99aa9bb766 | ||
|
6442025060 | ||
|
ac6624773e | ||
|
4669418731 | ||
|
ff07aadeb0 | ||
|
cde4767cbd | ||
|
1fe550cfc7 | ||
|
edef49eeac | ||
|
dbcd0e7aee | ||
|
0a87dc9f71 | ||
|
ed3c4e8d8e | ||
|
3956644474 | ||
|
262ad3bb2f | ||
|
44d42c2b16 | ||
|
14eb977c15 | ||
|
f8e5145e96 | ||
|
24888a10f6 | ||
|
3a1896db63 | ||
|
47319e065d | ||
|
b9b6e68903 | ||
|
c6c4d07830 | ||
|
9136c32327 | ||
|
7cb488934b | ||
|
aea1aa0daa | ||
|
79caa2f955 | ||
|
87552f2e67 | ||
|
aad639c1fa | ||
|
bee5153c5b | ||
|
31a938c73c | ||
|
9d44956d8c | ||
|
08d01be870 | ||
|
3660ee30e3 | ||
|
3424a7c2e3 | ||
|
36fda30565 | ||
|
541fda8e90 | ||
|
a8b1fec072 | ||
|
70c3d7783f | ||
|
fd0904805c | ||
|
3977a3c6e8 | ||
|
43cfc50bbb | ||
|
f6ebfaea19 | ||
|
daa8708601 | ||
|
7114360901 | ||
|
fc6192786a | ||
|
3d6a13f072 | ||
|
5ebe35cc09 | ||
|
5dfec22079 | ||
|
cee2490d84 | ||
|
3ce520ec80 | ||
|
7772535e79 | ||
|
bebad9e22e | ||
|
b31d51cac6 | ||
|
1d7fb64a6e | ||
|
ae65811be2 | ||
|
0e873d5cd8 | ||
|
2bc36de638 | ||
|
aca9143c13 | ||
|
e143eed8bc | ||
|
7d621608dd | ||
|
997ec12ec8 | ||
|
4a8f744255 | ||
|
49a2f5c55c | ||
|
07efcaf3b2 | ||
|
6b04087d5f | ||
|
d752acd960 | ||
|
7f94f2b393 | ||
|
970c938b56 | ||
|
d41ebd79f7 | ||
|
d0fadc859d | ||
|
40b28dc7e1 | ||
|
44c5f7721a | ||
|
a13cd44a13 | ||
|
2e89072681 | ||
|
7b5de59256 | ||
|
00b1722fb4 | ||
|
8fdaad4018 | ||
|
fefe6290e5 | ||
|
f925f295f4 | ||
|
cc770330f8 | ||
|
e42f7db450 | ||
|
9a57be4ac4 | ||
|
95831246a2 | ||
|
8af2e62556 | ||
|
6236ebaed5 | ||
|
50d3438b26 | ||
|
366d551cd2 | ||
|
393027d1b1 | ||
|
21d818be87 | ||
|
6d65028804 | ||
|
c0e1c67c78 | ||
|
b9b8ddc160 | ||
|
d96d56ff09 | ||
|
cc7b8cc980 | ||
|
8ca74127d9 | ||
|
fc2942d4e0 | ||
|
874954d8bd | ||
|
0bfb1bded3 | ||
|
4765040aa3 | ||
|
de0300b1c6 | ||
|
4807ef2af0 | ||
|
c853881610 | ||
|
2450c5a46b | ||
|
a490e68553 | ||
|
5d2b3687b0 | ||
|
d7e36c99fb | ||
|
1249d36bdd | ||
|
287d1656de | ||
|
39976cd2bf | ||
|
85f1b6ff8f | ||
|
1650fa8889 | ||
|
e9e7491f2b | ||
|
2609d4e252 | ||
|
188c5d4a7c | ||
|
ff4ec67b90 | ||
|
fee68df273 | ||
|
b5a0d7a188 | ||
|
f7cf9fbe48 | ||
|
ee87eaf9ad | ||
|
09a0b0a84a | ||
|
8e6ed32610 | ||
|
dfd2f917dc | ||
|
4f1dd92056 | ||
|
f10c50958c | ||
|
40515da6d6 | ||
|
3b9370fcf8 | ||
|
51bf7da729 | ||
|
f1bd611d41 | ||
|
e9f7c05ae1 | ||
|
72156dd7a4 | ||
|
554a933944 | ||
|
8d43d7fa6b | ||
|
a61b411ceb | ||
|
d2590dc3cd | ||
|
274316f89e | ||
|
e3e3a31989 | ||
|
704e7a2d71 | ||
|
87b7e40a34 | ||
|
901fb577cb | ||
|
fc8b388eac | ||
|
9aeda305fd | ||
|
48e314fbe2 | ||
|
29c636bf80 | ||
|
64b0b54fc8 | ||
|
e8d00f02aa | ||
|
7b086898ee | ||
|
292d352ee4 | ||
|
2293a20972 | ||
|
76fa56b62d | ||
|
e5958a8f08 | ||
|
2dc3e510d4 | ||
|
e7f4963e73 | ||
|
629397f70e | ||
|
1e6029e81e | ||
|
2a33c73574 | ||
|
4bf8eec265 | ||
|
dfcb3e17ae | ||
|
8e9684c029 | ||
|
2c17e9a333 | ||
|
d1d9fd50c2 | ||
|
8912c1fade | ||
|
332de3f1e3 | ||
|
2160f0041d | ||
|
3254fa3b50 | ||
|
b73c27ef6b | ||
|
ec89e7cde1 | ||
|
15f9cb5c4d | ||
|
ebfc35f887 | ||
|
f47d5ced16 | ||
|
6c78a1166e | ||
|
8ae63006f1 | ||
|
aeb600bc4a | ||
|
e0d8418ddc | ||
|
e6a5f44e61 | ||
|
7130076488 | ||
|
0133759476 | ||
|
86839c826f | ||
|
f93e0ef4d6 | ||
|
572457e265 | ||
|
49377cdd63 | ||
|
910d5c44fc | ||
|
0e3d20cb20 | ||
|
a96b75191e | ||
|
a285cd4d88 | ||
|
4f057d8bb6 | ||
|
1240460547 | ||
|
d9a6b805b3 | ||
|
e88c28941f | ||
|
9c4984db6b | ||
|
af7c8ff045 | ||
|
6de52a29a8 | ||
|
ad0ee82f0d | ||
|
85b9568d0e | ||
|
826003ecae | ||
|
e2bd8edb0d | ||
|
44fde1bdb7 | ||
|
d8f20bfdc1 | ||
|
6ab3b50a3f | ||
|
6d41219bae | ||
|
dcbd68a1d4 | ||
|
112fb22152 | ||
|
a60b458179 | ||
|
a9081299dd | ||
|
48a144954e | ||
|
c4c8a80958 | ||
|
1b928c1bd5 | ||
|
e34ab5200d | ||
|
863ca3f185 | ||
|
edcc51cbee | ||
|
6408132d74 | ||
|
d64dd71200 | ||
|
e0ba440909 | ||
|
269e10a725 | ||
|
149b7e7f03 | ||
|
c51efa8617 | ||
|
52791b1c14 | ||
|
cdbca4061b | ||
|
c52e221207 | ||
|
e417e8dfc2 | ||
|
6905fe7488 |
1468 changed files with 100447 additions and 34342 deletions
22
.DEREK.yml
22
.DEREK.yml
|
@ -1,22 +0,0 @@
|
||||||
curators:
|
|
||||||
- aboch
|
|
||||||
- alexellis
|
|
||||||
- andrewhsu
|
|
||||||
- anonymuse
|
|
||||||
- arkodg
|
|
||||||
- chanwit
|
|
||||||
- ehazlett
|
|
||||||
- fntlnz
|
|
||||||
- gianarb
|
|
||||||
- kolyshkin
|
|
||||||
- mgoelzer
|
|
||||||
- olljanat
|
|
||||||
- programmerq
|
|
||||||
- rheinwein
|
|
||||||
- ripcurld0
|
|
||||||
- thajeztah
|
|
||||||
|
|
||||||
features:
|
|
||||||
- comments
|
|
||||||
- pr_description_required
|
|
||||||
|
|
|
@ -1,7 +1,4 @@
|
||||||
.git
|
.git
|
||||||
.go-pkg-cache
|
bundles/
|
||||||
.gopath
|
|
||||||
bundles
|
|
||||||
cli/winresources/**/winres.json
|
cli/winresources/**/winres.json
|
||||||
cli/winresources/**/*.syso
|
cli/winresources/**/*.syso
|
||||||
vendor/pkg
|
|
||||||
|
|
27
.github/actions/setup-runner/action.yml
vendored
Normal file
27
.github/actions/setup-runner/action.yml
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
name: 'Setup Runner'
|
||||||
|
description: 'Composite action to set up the GitHub Runner for jobs in the test.yml workflow'
|
||||||
|
|
||||||
|
runs:
|
||||||
|
using: composite
|
||||||
|
steps:
|
||||||
|
- run: |
|
||||||
|
sudo modprobe ip_vs
|
||||||
|
sudo modprobe ipv6
|
||||||
|
sudo modprobe ip6table_filter
|
||||||
|
sudo modprobe -r overlay
|
||||||
|
sudo modprobe overlay redirect_dir=off
|
||||||
|
shell: bash
|
||||||
|
- run: |
|
||||||
|
if [ ! -e /etc/docker/daemon.json ]; then
|
||||||
|
echo '{}' | tee /etc/docker/daemon.json >/dev/null
|
||||||
|
fi
|
||||||
|
DOCKERD_CONFIG=$(jq '.+{"experimental":true,"live-restore":true,"ipv6":true,"fixed-cidr-v6":"2001:db8:1::/64"}' /etc/docker/daemon.json)
|
||||||
|
sudo tee /etc/docker/daemon.json <<<"$DOCKERD_CONFIG" >/dev/null
|
||||||
|
sudo service docker restart
|
||||||
|
shell: bash
|
||||||
|
- run: |
|
||||||
|
./contrib/check-config.sh || true
|
||||||
|
shell: bash
|
||||||
|
- run: |
|
||||||
|
docker info
|
||||||
|
shell: bash
|
48
.github/workflows/.dco.yml
vendored
Normal file
48
.github/workflows/.dco.yml
vendored
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
# reusable workflow
|
||||||
|
name: .dco
|
||||||
|
|
||||||
|
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_call:
|
||||||
|
|
||||||
|
env:
|
||||||
|
ALPINE_VERSION: 3.16
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
run:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
-
|
||||||
|
name: Dump context
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
console.log(JSON.stringify(context, null, 2));
|
||||||
|
-
|
||||||
|
name: Get base ref
|
||||||
|
id: base-ref
|
||||||
|
uses: actions/github-script@v6
|
||||||
|
with:
|
||||||
|
result-encoding: string
|
||||||
|
script: |
|
||||||
|
if (/^refs\/pull\//.test(context.ref) && context.payload?.pull_request?.base?.ref != undefined) {
|
||||||
|
return context.payload.pull_request.base.ref;
|
||||||
|
}
|
||||||
|
return context.ref.replace(/^refs\/heads\//g, '');
|
||||||
|
-
|
||||||
|
name: Validate
|
||||||
|
run: |
|
||||||
|
docker run --rm \
|
||||||
|
-v "$(pwd):/workspace" \
|
||||||
|
-e VALIDATE_REPO \
|
||||||
|
-e VALIDATE_BRANCH \
|
||||||
|
alpine:${{ env.ALPINE_VERSION }} sh -c 'apk add --no-cache -q bash git openssh-client && git config --system --add safe.directory /workspace && cd /workspace && hack/validate/dco'
|
||||||
|
env:
|
||||||
|
VALIDATE_REPO: ${{ github.server_url }}/${{ github.repository }}.git
|
||||||
|
VALIDATE_BRANCH: ${{ steps.base-ref.outputs.result }}
|
|
@ -1,21 +1,21 @@
|
||||||
name: windows
|
# reusable workflow
|
||||||
|
name: .windows
|
||||||
|
|
||||||
concurrency:
|
# TODO: hide reusable workflow from the UI. Tracked in https://github.com/community/community/discussions/12025
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_call:
|
||||||
push:
|
inputs:
|
||||||
branches:
|
os:
|
||||||
- 'master'
|
required: true
|
||||||
- '[0-9]+.[0-9]{2}'
|
type: string
|
||||||
tags:
|
send_coverage:
|
||||||
- 'v*'
|
required: false
|
||||||
pull_request:
|
type: boolean
|
||||||
|
default: false
|
||||||
|
|
||||||
env:
|
env:
|
||||||
GO_VERSION: 1.18.3
|
GO_VERSION: 1.19.3
|
||||||
GOTESTLIST_VERSION: v0.2.0
|
GOTESTLIST_VERSION: v0.2.0
|
||||||
TESTSTAT_VERSION: v0.1.3
|
TESTSTAT_VERSION: v0.1.3
|
||||||
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
|
WINDOWS_BASE_IMAGE: mcr.microsoft.com/windows/servercore
|
||||||
|
@ -24,16 +24,11 @@ env:
|
||||||
TEST_IMAGE_NAME: moby:test
|
TEST_IMAGE_NAME: moby:test
|
||||||
TEST_CTN_NAME: moby
|
TEST_CTN_NAME: moby
|
||||||
DOCKER_BUILDKIT: 0
|
DOCKER_BUILDKIT: 0
|
||||||
|
ITG_CLI_MATRIX_SIZE: 6
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ inputs.os }}
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
os:
|
|
||||||
- windows-2019
|
|
||||||
- windows-2022
|
|
||||||
env:
|
env:
|
||||||
GOPATH: ${{ github.workspace }}\go
|
GOPATH: ${{ github.workspace }}\go
|
||||||
GOBIN: ${{ github.workspace }}\go\bin
|
GOBIN: ${{ github.workspace }}\go\bin
|
||||||
|
@ -56,9 +51,9 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go-build"
|
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go-build"
|
||||||
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go\pkg\mod"
|
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go\pkg\mod"
|
||||||
If ("${{ matrix.os }}" -eq "windows-2019") {
|
If ("${{ inputs.os }}" -eq "windows-2019") {
|
||||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||||
} ElseIf ("${{ matrix.os }}" -eq "windows-2022") {
|
} ElseIf ("${{ inputs.os }}" -eq "windows-2022") {
|
||||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||||
}
|
}
|
||||||
-
|
-
|
||||||
|
@ -70,9 +65,9 @@ jobs:
|
||||||
~\go\pkg\mod
|
~\go\pkg\mod
|
||||||
${{ github.workspace }}\go-build
|
${{ github.workspace }}\go-build
|
||||||
${{ env.GOPATH }}\pkg\mod
|
${{ env.GOPATH }}\pkg\mod
|
||||||
key: ${{ matrix.os }}-${{ github.job }}-${{ hashFiles('**/vendor.sum') }}
|
key: ${{ inputs.os }}-${{ github.job }}-${{ hashFiles('**/vendor.sum') }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ matrix.os }}-${{ github.job }}-
|
${{ inputs.os }}-${{ github.job }}-
|
||||||
-
|
-
|
||||||
name: Docker info
|
name: Docker info
|
||||||
run: |
|
run: |
|
||||||
|
@ -103,19 +98,14 @@ jobs:
|
||||||
name: Upload artifacts
|
name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: build-${{ matrix.os }}
|
name: build-${{ inputs.os }}
|
||||||
path: ${{ env.BIN_OUT }}/*
|
path: ${{ env.BIN_OUT }}/*
|
||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
retention-days: 2
|
retention-days: 2
|
||||||
|
|
||||||
unit-test:
|
unit-test:
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ inputs.os }}
|
||||||
strategy:
|
timeout-minutes: 120
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
os:
|
|
||||||
- windows-2019
|
|
||||||
- windows-2022
|
|
||||||
env:
|
env:
|
||||||
GOPATH: ${{ github.workspace }}\go
|
GOPATH: ${{ github.workspace }}\go
|
||||||
GOBIN: ${{ github.workspace }}\go\bin
|
GOBIN: ${{ github.workspace }}\go\bin
|
||||||
|
@ -138,9 +128,9 @@ jobs:
|
||||||
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go-build"
|
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go-build"
|
||||||
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go\pkg\mod"
|
New-Item -ItemType "directory" -Path "${{ github.workspace }}\go\pkg\mod"
|
||||||
New-Item -ItemType "directory" -Path "bundles"
|
New-Item -ItemType "directory" -Path "bundles"
|
||||||
If ("${{ matrix.os }}" -eq "windows-2019") {
|
If ("${{ inputs.os }}" -eq "windows-2019") {
|
||||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||||
} ElseIf ("${{ matrix.os }}" -eq "windows-2022") {
|
} ElseIf ("${{ inputs.os }}" -eq "windows-2022") {
|
||||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||||
}
|
}
|
||||||
-
|
-
|
||||||
|
@ -152,9 +142,9 @@ jobs:
|
||||||
~\go\pkg\mod
|
~\go\pkg\mod
|
||||||
${{ github.workspace }}\go-build
|
${{ github.workspace }}\go-build
|
||||||
${{ env.GOPATH }}\pkg\mod
|
${{ env.GOPATH }}\pkg\mod
|
||||||
key: ${{ matrix.os }}-${{ github.job }}-${{ hashFiles('**/vendor.sum') }}
|
key: ${{ inputs.os }}-${{ github.job }}-${{ hashFiles('**/vendor.sum') }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ matrix.os }}-${{ github.job }}-
|
${{ inputs.os }}-${{ github.job }}-
|
||||||
-
|
-
|
||||||
name: Docker info
|
name: Docker info
|
||||||
run: |
|
run: |
|
||||||
|
@ -175,7 +165,7 @@ jobs:
|
||||||
${{ env.TEST_IMAGE_NAME }} hack\make.ps1 -TestUnit
|
${{ env.TEST_IMAGE_NAME }} hack\make.ps1 -TestUnit
|
||||||
-
|
-
|
||||||
name: Send to Codecov
|
name: Send to Codecov
|
||||||
if: matrix.os == 'windows-2022'
|
if: inputs.send_coverage
|
||||||
uses: codecov/codecov-action@v3
|
uses: codecov/codecov-action@v3
|
||||||
with:
|
with:
|
||||||
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
|
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
|
||||||
|
@ -187,7 +177,7 @@ jobs:
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.os }}-unit-reports
|
name: ${{ inputs.os }}-unit-reports
|
||||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
||||||
|
|
||||||
unit-test-report:
|
unit-test-report:
|
||||||
|
@ -195,12 +185,6 @@ jobs:
|
||||||
if: always()
|
if: always()
|
||||||
needs:
|
needs:
|
||||||
- unit-test
|
- unit-test
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
os:
|
|
||||||
- windows-2019
|
|
||||||
- windows-2022
|
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Set up Go
|
name: Set up Go
|
||||||
|
@ -211,7 +195,7 @@ jobs:
|
||||||
name: Download artifacts
|
name: Download artifacts
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.os }}-unit-reports
|
name: ${{ inputs.os }}-unit-reports
|
||||||
path: /tmp/artifacts
|
path: /tmp/artifacts
|
||||||
-
|
-
|
||||||
name: Install teststat
|
name: Install teststat
|
||||||
|
@ -246,25 +230,23 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
# Distribute integration-cli tests for the matrix in integration-test job.
|
# Distribute integration-cli tests for the matrix in integration-test job.
|
||||||
# Also prepend ./... to the matrix. This is a special case to run "Test integration" step exclusively.
|
# Also prepend ./... to the matrix. This is a special case to run "Test integration" step exclusively.
|
||||||
matrix="$(gotestlist -d 4 ./...)"
|
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} ./...)"
|
||||||
matrix="$(echo "$matrix" | jq -c '. |= ["./..."] + .')"
|
matrix="$(echo "$matrix" | jq -c '. |= ["./..."] + .')"
|
||||||
echo "::set-output name=matrix::$matrix"
|
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
||||||
-
|
-
|
||||||
name: Show matrix
|
name: Show matrix
|
||||||
run: |
|
run: |
|
||||||
echo ${{ steps.tests.outputs.matrix }}
|
echo ${{ steps.tests.outputs.matrix }}
|
||||||
|
|
||||||
integration-test:
|
integration-test:
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ inputs.os }}
|
||||||
|
timeout-minutes: 120
|
||||||
needs:
|
needs:
|
||||||
- build
|
- build
|
||||||
- integration-test-prepare
|
- integration-test-prepare
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os:
|
|
||||||
- windows-2019
|
|
||||||
- windows-2022
|
|
||||||
runtime:
|
runtime:
|
||||||
- builtin
|
- builtin
|
||||||
- containerd
|
- containerd
|
||||||
|
@ -290,15 +272,15 @@ jobs:
|
||||||
name: Download artifacts
|
name: Download artifacts
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: build-${{ matrix.os }}
|
name: build-${{ inputs.os }}
|
||||||
path: ${{ env.BIN_OUT }}
|
path: ${{ env.BIN_OUT }}
|
||||||
-
|
-
|
||||||
name: Init
|
name: Init
|
||||||
run: |
|
run: |
|
||||||
New-Item -ItemType "directory" -Path "bundles"
|
New-Item -ItemType "directory" -Path "bundles"
|
||||||
If ("${{ matrix.os }}" -eq "windows-2019") {
|
If ("${{ inputs.os }}" -eq "windows-2019") {
|
||||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2019 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||||
} ElseIf ("${{ matrix.os }}" -eq "windows-2022") {
|
} ElseIf ("${{ inputs.os }}" -eq "windows-2022") {
|
||||||
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
echo "WINDOWS_BASE_IMAGE_TAG=${{ env.WINDOWS_BASE_TAG_2022 }}" | Out-File -FilePath $Env:GITHUB_ENV -Encoding utf-8 -Append
|
||||||
}
|
}
|
||||||
Write-Output "${{ env.BIN_OUT }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
Write-Output "${{ env.BIN_OUT }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||||
|
@ -315,7 +297,7 @@ jobs:
|
||||||
Stop-Service -Force -Name docker
|
Stop-Service -Force -Name docker
|
||||||
Remove-Service -Name docker
|
Remove-Service -Name docker
|
||||||
# removes event log entry. we could use "Remove-EventLog -LogName -Source docker"
|
# removes event log entry. we could use "Remove-EventLog -LogName -Source docker"
|
||||||
# but this cmd is only available since windows-2022
|
# but this cmd is not available atm
|
||||||
$ErrorActionPreference = "SilentlyContinue"
|
$ErrorActionPreference = "SilentlyContinue"
|
||||||
& reg delete "HKLM\SYSTEM\CurrentControlSet\Services\EventLog\Application\docker" /f 2>&1 | Out-Null
|
& reg delete "HKLM\SYSTEM\CurrentControlSet\Services\EventLog\Application\docker" /f 2>&1 | Out-Null
|
||||||
$ErrorActionPreference = "Stop"
|
$ErrorActionPreference = "Stop"
|
||||||
|
@ -431,7 +413,7 @@ jobs:
|
||||||
INTEGRATION_TESTRUN: ${{ matrix.test }}
|
INTEGRATION_TESTRUN: ${{ matrix.test }}
|
||||||
-
|
-
|
||||||
name: Send to Codecov
|
name: Send to Codecov
|
||||||
if: matrix.os == 'windows-2022'
|
if: inputs.send_coverage
|
||||||
uses: codecov/codecov-action@v3
|
uses: codecov/codecov-action@v3
|
||||||
with:
|
with:
|
||||||
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
|
working-directory: ${{ env.GOPATH }}\src\github.com\docker\docker
|
||||||
|
@ -480,7 +462,7 @@ jobs:
|
||||||
if: always()
|
if: always()
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.os }}-integration-reports-${{ matrix.runtime }}
|
name: ${{ inputs.os }}-integration-reports-${{ matrix.runtime }}
|
||||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
||||||
|
|
||||||
integration-test-report:
|
integration-test-report:
|
||||||
|
@ -491,9 +473,6 @@ jobs:
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os:
|
|
||||||
- windows-2019
|
|
||||||
- windows-2022
|
|
||||||
runtime:
|
runtime:
|
||||||
- builtin
|
- builtin
|
||||||
- containerd
|
- containerd
|
||||||
|
@ -507,7 +486,7 @@ jobs:
|
||||||
name: Download artifacts
|
name: Download artifacts
|
||||||
uses: actions/download-artifact@v3
|
uses: actions/download-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.os }}-integration-reports-${{ matrix.runtime }}
|
name: ${{ inputs.os }}-integration-reports-${{ matrix.runtime }}
|
||||||
path: /tmp/artifacts
|
path: /tmp/artifacts
|
||||||
-
|
-
|
||||||
name: Install teststat
|
name: Install teststat
|
113
.github/workflows/buildkit.yml
vendored
Normal file
113
.github/workflows/buildkit.yml
vendored
Normal file
|
@ -0,0 +1,113 @@
|
||||||
|
name: buildkit
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
- '[0-9]+.[0-9]{2}'
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
env:
|
||||||
|
BUNDLES_OUTPUT: ./bundles
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
validate-dco:
|
||||||
|
uses: ./.github/workflows/.dco.yml
|
||||||
|
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
needs:
|
||||||
|
- validate-dco
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
-
|
||||||
|
name: Build
|
||||||
|
uses: docker/bake-action@v2
|
||||||
|
with:
|
||||||
|
targets: binary
|
||||||
|
-
|
||||||
|
name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: binary
|
||||||
|
path: ${{ env.BUNDLES_OUTPUT }}
|
||||||
|
if-no-files-found: error
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
timeout-minutes: 120
|
||||||
|
needs:
|
||||||
|
- build
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
pkg:
|
||||||
|
- client
|
||||||
|
- cmd/buildctl
|
||||||
|
- solver
|
||||||
|
- frontend
|
||||||
|
- frontend/dockerfile
|
||||||
|
typ:
|
||||||
|
- integration
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
path: moby
|
||||||
|
-
|
||||||
|
name: BuildKit ref
|
||||||
|
run: |
|
||||||
|
./hack/go-mod-prepare.sh
|
||||||
|
# FIXME(thaJeztah) temporarily overriding version to use for tests; remove with the next release of buildkit
|
||||||
|
# echo "BUILDKIT_REF=$(./hack/buildkit-ref)" >> $GITHUB_ENV
|
||||||
|
echo "BUILDKIT_REF=4febae4f874bd8ef52dec30e988c8fe0bc96b3b9" >> $GITHUB_ENV
|
||||||
|
working-directory: moby
|
||||||
|
-
|
||||||
|
name: Checkout BuildKit ${{ env.BUILDKIT_REF }}
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
repository: "moby/buildkit"
|
||||||
|
ref: ${{ env.BUILDKIT_REF }}
|
||||||
|
path: buildkit
|
||||||
|
-
|
||||||
|
name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v2
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
-
|
||||||
|
name: Download binary artifacts
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: binary
|
||||||
|
path: ./buildkit/build/moby/
|
||||||
|
-
|
||||||
|
name: Update daemon.json
|
||||||
|
run: |
|
||||||
|
sudo rm /etc/docker/daemon.json
|
||||||
|
sudo service docker restart
|
||||||
|
docker version
|
||||||
|
docker info
|
||||||
|
-
|
||||||
|
name: Test
|
||||||
|
run: |
|
||||||
|
./hack/test ${{ matrix.typ }}
|
||||||
|
env:
|
||||||
|
CONTEXT: "."
|
||||||
|
TEST_DOCKERD: "1"
|
||||||
|
TEST_DOCKERD_BINARY: "./build/moby/binary-daemon/dockerd"
|
||||||
|
TESTPKGS: "./${{ matrix.pkg }}"
|
||||||
|
TESTFLAGS: "-v --parallel=1 --timeout=30m --run=//worker=dockerd$"
|
||||||
|
working-directory: buildkit
|
96
.github/workflows/ci.yml
vendored
96
.github/workflows/ci.yml
vendored
|
@ -9,7 +9,7 @@ on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'master'
|
- 'master'
|
||||||
- '[0-9]+.[0-9]{2}'
|
- '[0-9]+.[0-9]+'
|
||||||
tags:
|
tags:
|
||||||
- 'v*'
|
- 'v*'
|
||||||
pull_request:
|
pull_request:
|
||||||
|
@ -18,8 +18,13 @@ env:
|
||||||
BUNDLES_OUTPUT: ./bundles
|
BUNDLES_OUTPUT: ./bundles
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
validate-dco:
|
||||||
|
uses: ./.github/workflows/.dco.yml
|
||||||
|
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
needs:
|
||||||
|
- validate-dco
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
@ -34,15 +39,12 @@ jobs:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v2
|
||||||
-
|
-
|
||||||
name: Build
|
name: Build
|
||||||
uses: docker/bake-action@v1
|
uses: docker/bake-action@v2
|
||||||
with:
|
with:
|
||||||
targets: ${{ matrix.target }}
|
targets: ${{ matrix.target }}
|
||||||
set: |
|
|
||||||
*.cache-from=type=gha,scope=build-${{ matrix.target }}
|
|
||||||
*.cache-to=type=gha,scope=build-${{ matrix.target }}
|
|
||||||
-
|
-
|
||||||
name: Upload artifacts
|
name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v3
|
||||||
|
@ -54,6 +56,8 @@ jobs:
|
||||||
|
|
||||||
cross:
|
cross:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
needs:
|
||||||
|
- validate-dco
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
@ -80,93 +84,19 @@ jobs:
|
||||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v2
|
||||||
-
|
-
|
||||||
name: Build
|
name: Build
|
||||||
uses: docker/bake-action@v1
|
uses: docker/bake-action@v2
|
||||||
with:
|
with:
|
||||||
targets: cross
|
targets: cross
|
||||||
set: |
|
|
||||||
*.cache-from=type=gha,scope=cross-${{ env.PLATFORM_PAIR }}
|
|
||||||
*.cache-to=type=gha,scope=cross-${{ env.PLATFORM_PAIR }}
|
|
||||||
env:
|
env:
|
||||||
DOCKER_CROSSPLATFORMS: ${{ matrix.platform }}
|
DOCKER_CROSSPLATFORMS: ${{ matrix.platform }}
|
||||||
-
|
-
|
||||||
name: Upload artifacts
|
name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: cross-${{ env.PLATFORM_PAIR }}
|
name: cross-${{ env.PLATFORM_PAIR }}
|
||||||
path: ${{ env.BUNDLES_OUTPUT }}
|
path: ${{ env.BUNDLES_OUTPUT }}
|
||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
retention-days: 7
|
retention-days: 7
|
||||||
|
|
||||||
test-buildkit:
|
|
||||||
needs:
|
|
||||||
- build
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
pkg:
|
|
||||||
- ./client
|
|
||||||
- ./cmd/buildctl
|
|
||||||
- ./solver
|
|
||||||
- ./frontend
|
|
||||||
- ./frontend/dockerfile
|
|
||||||
typ:
|
|
||||||
- integration
|
|
||||||
include:
|
|
||||||
- pkg: ./...
|
|
||||||
skip-integration-tests: 1
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
path: moby
|
|
||||||
-
|
|
||||||
name: BuildKit ref
|
|
||||||
run: |
|
|
||||||
./hack/go-mod-prepare.sh
|
|
||||||
echo "BUILDKIT_REF=0da740f7d4f782a52b416a44f564ac37504b9ee1" >> $GITHUB_ENV
|
|
||||||
# FIXME(thaJeztah) temporarily overriding version to use for tests to include https://github.com/moby/buildkit/pull/2872
|
|
||||||
# echo "BUILDKIT_REF=$(./hack/buildkit-ref)" >> $GITHUB_ENV
|
|
||||||
working-directory: moby
|
|
||||||
-
|
|
||||||
name: Checkout BuildKit ${{ env.BUILDKIT_REF }}
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
repository: "moby/buildkit"
|
|
||||||
ref: ${{ env.BUILDKIT_REF }}
|
|
||||||
path: buildkit
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
-
|
|
||||||
name: Download binary artifacts
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: binary
|
|
||||||
path: ./buildkit/build/moby/
|
|
||||||
-
|
|
||||||
name: Update daemon.json
|
|
||||||
run: |
|
|
||||||
sudo rm /etc/docker/daemon.json
|
|
||||||
sudo service docker restart
|
|
||||||
docker version
|
|
||||||
docker info
|
|
||||||
-
|
|
||||||
name: Test
|
|
||||||
run: |
|
|
||||||
./hack/test ${{ matrix.typ }}
|
|
||||||
env:
|
|
||||||
CONTEXT: "."
|
|
||||||
TEST_DOCKERD: "1"
|
|
||||||
TEST_DOCKERD_BINARY: "./build/moby/binary-daemon/dockerd"
|
|
||||||
TESTPKGS: "${{ matrix.pkg }}"
|
|
||||||
TESTFLAGS: "-v --parallel=1 --timeout=30m --run=//worker=dockerd$"
|
|
||||||
SKIP_INTEGRATION_TESTS: "${{ matrix.skip-integration-tests }}"
|
|
||||||
working-directory: buildkit
|
|
||||||
|
|
504
.github/workflows/test.yml
vendored
Normal file
504
.github/workflows/test.yml
vendored
Normal file
|
@ -0,0 +1,504 @@
|
||||||
|
name: test
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
- '[0-9]+.[0-9]+'
|
||||||
|
tags:
|
||||||
|
- 'v*'
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
env:
|
||||||
|
GO_VERSION: 1.19.3
|
||||||
|
GOTESTLIST_VERSION: v0.2.0
|
||||||
|
TESTSTAT_VERSION: v0.1.3
|
||||||
|
ITG_CLI_MATRIX_SIZE: 6
|
||||||
|
DOCKER_EXPERIMENTAL: 1
|
||||||
|
DOCKER_GRAPHDRIVER: overlay2
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
validate-dco:
|
||||||
|
uses: ./.github/workflows/.dco.yml
|
||||||
|
|
||||||
|
build-dev:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
needs:
|
||||||
|
- validate-dco
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
mode:
|
||||||
|
- ""
|
||||||
|
- systemd
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Prepare
|
||||||
|
run: |
|
||||||
|
if [ "${{ matrix.mode }}" = "systemd" ]; then
|
||||||
|
echo "SYSTEMD=true" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
-
|
||||||
|
name: Build dev image
|
||||||
|
uses: docker/bake-action@v2
|
||||||
|
with:
|
||||||
|
targets: dev
|
||||||
|
set: |
|
||||||
|
*.cache-from=type=gha,scope=dev${{ matrix.mode }}
|
||||||
|
*.cache-to=type=gha,scope=dev${{ matrix.mode }},mode=max
|
||||||
|
*.output=type=cacheonly
|
||||||
|
|
||||||
|
validate-prepare:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
needs:
|
||||||
|
- validate-dco
|
||||||
|
outputs:
|
||||||
|
matrix: ${{ steps.scripts.outputs.matrix }}
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Create matrix
|
||||||
|
id: scripts
|
||||||
|
run: |
|
||||||
|
scripts=$(jq -ncR '[inputs]' <<< "$(ls -I .validate -I all -I default -I dco -I golangci-lint.yml -I yamllint.yaml -A ./hack/validate/)")
|
||||||
|
echo "matrix=$scripts" >> $GITHUB_OUTPUT
|
||||||
|
-
|
||||||
|
name: Show matrix
|
||||||
|
run: |
|
||||||
|
echo ${{ steps.scripts.outputs.matrix }}
|
||||||
|
|
||||||
|
validate:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
needs:
|
||||||
|
- validate-prepare
|
||||||
|
- build-dev
|
||||||
|
strategy:
|
||||||
|
fail-fast: true
|
||||||
|
matrix:
|
||||||
|
script: ${{ fromJson(needs.validate-prepare.outputs.matrix) }}
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
-
|
||||||
|
name: Set up runner
|
||||||
|
uses: ./.github/actions/setup-runner
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
-
|
||||||
|
name: Build dev image
|
||||||
|
uses: docker/bake-action@v2
|
||||||
|
with:
|
||||||
|
targets: dev
|
||||||
|
set: |
|
||||||
|
dev.cache-from=type=gha,scope=dev
|
||||||
|
-
|
||||||
|
name: Validate
|
||||||
|
run: |
|
||||||
|
make -o build validate-${{ matrix.script }}
|
||||||
|
|
||||||
|
unit:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
timeout-minutes: 120
|
||||||
|
needs:
|
||||||
|
- build-dev
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Set up runner
|
||||||
|
uses: ./.github/actions/setup-runner
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
-
|
||||||
|
name: Build dev image
|
||||||
|
uses: docker/bake-action@v2
|
||||||
|
with:
|
||||||
|
targets: dev
|
||||||
|
set: |
|
||||||
|
dev.cache-from=type=gha,scope=dev
|
||||||
|
-
|
||||||
|
name: Test
|
||||||
|
run: |
|
||||||
|
make -o build test-unit
|
||||||
|
-
|
||||||
|
name: Prepare reports
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
mkdir -p bundles /tmp/reports
|
||||||
|
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
||||||
|
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
|
||||||
|
sudo chown -R $(id -u):$(id -g) /tmp/reports
|
||||||
|
tree -nh /tmp/reports
|
||||||
|
-
|
||||||
|
name: Send to Codecov
|
||||||
|
uses: codecov/codecov-action@v3
|
||||||
|
with:
|
||||||
|
directory: ./bundles
|
||||||
|
env_vars: RUNNER_OS
|
||||||
|
flags: unit
|
||||||
|
-
|
||||||
|
name: Upload reports
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: unit-reports
|
||||||
|
path: /tmp/reports/*
|
||||||
|
|
||||||
|
unit-report:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
if: always()
|
||||||
|
needs:
|
||||||
|
- unit
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Set up Go
|
||||||
|
uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
-
|
||||||
|
name: Download reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: unit-reports
|
||||||
|
path: /tmp/reports
|
||||||
|
-
|
||||||
|
name: Install teststat
|
||||||
|
run: |
|
||||||
|
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
|
||||||
|
-
|
||||||
|
name: Create summary
|
||||||
|
run: |
|
||||||
|
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
docker-py:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
timeout-minutes: 120
|
||||||
|
needs:
|
||||||
|
- build-dev
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Set up runner
|
||||||
|
uses: ./.github/actions/setup-runner
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
-
|
||||||
|
name: Build dev image
|
||||||
|
uses: docker/bake-action@v2
|
||||||
|
with:
|
||||||
|
targets: dev
|
||||||
|
set: |
|
||||||
|
dev.cache-from=type=gha,scope=dev
|
||||||
|
-
|
||||||
|
name: Test
|
||||||
|
run: |
|
||||||
|
make -o build test-docker-py
|
||||||
|
-
|
||||||
|
name: Prepare reports
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
mkdir -p bundles /tmp/reports
|
||||||
|
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
||||||
|
tar -xzf /tmp/reports.tar.gz -C /tmp/reports
|
||||||
|
sudo chown -R $(id -u):$(id -g) /tmp/reports
|
||||||
|
tree -nh /tmp/reports
|
||||||
|
-
|
||||||
|
name: Test daemon logs
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
cat bundles/test-docker-py/docker.log
|
||||||
|
-
|
||||||
|
name: Upload reports
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: docker-py-reports
|
||||||
|
path: /tmp/reports/*
|
||||||
|
|
||||||
|
integration-flaky:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
timeout-minutes: 120
|
||||||
|
needs:
|
||||||
|
- build-dev
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Set up runner
|
||||||
|
uses: ./.github/actions/setup-runner
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
-
|
||||||
|
name: Build dev image
|
||||||
|
uses: docker/bake-action@v2
|
||||||
|
with:
|
||||||
|
targets: dev
|
||||||
|
set: |
|
||||||
|
dev.cache-from=type=gha,scope=dev
|
||||||
|
-
|
||||||
|
name: Test
|
||||||
|
run: |
|
||||||
|
make -o build test-integration-flaky
|
||||||
|
env:
|
||||||
|
TEST_SKIP_INTEGRATION_CLI: 1
|
||||||
|
|
||||||
|
integration:
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
timeout-minutes: 120
|
||||||
|
needs:
|
||||||
|
- build-dev
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os:
|
||||||
|
- ubuntu-20.04
|
||||||
|
- ubuntu-22.04
|
||||||
|
mode:
|
||||||
|
- ""
|
||||||
|
- rootless
|
||||||
|
- systemd
|
||||||
|
#- rootless-systemd FIXME: https://github.com/moby/moby/issues/44084
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Set up runner
|
||||||
|
uses: ./.github/actions/setup-runner
|
||||||
|
-
|
||||||
|
name: Prepare
|
||||||
|
run: |
|
||||||
|
CACHE_DEV_SCOPE=dev
|
||||||
|
if [[ "${{ matrix.mode }}" == *"rootless"* ]]; then
|
||||||
|
echo "DOCKER_ROOTLESS=1" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
if [[ "${{ matrix.mode }}" == *"systemd"* ]]; then
|
||||||
|
echo "SYSTEMD=true" >> $GITHUB_ENV
|
||||||
|
CACHE_DEV_SCOPE="${CACHE_DEV_SCOPE}systemd"
|
||||||
|
fi
|
||||||
|
echo "CACHE_DEV_SCOPE=${CACHE_DEV_SCOPE}" >> $GITHUB_ENV
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
-
|
||||||
|
name: Build dev image
|
||||||
|
uses: docker/bake-action@v2
|
||||||
|
with:
|
||||||
|
targets: dev
|
||||||
|
set: |
|
||||||
|
dev.cache-from=type=gha,scope=${{ env.CACHE_DEV_SCOPE }}
|
||||||
|
-
|
||||||
|
name: Test
|
||||||
|
run: |
|
||||||
|
make -o build test-integration
|
||||||
|
env:
|
||||||
|
TEST_SKIP_INTEGRATION_CLI: 1
|
||||||
|
TESTCOVERAGE: 1
|
||||||
|
-
|
||||||
|
name: Prepare reports
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
reportsPath="/tmp/reports/${{ matrix.os }}"
|
||||||
|
if [ -n "${{ matrix.mode }}" ]; then
|
||||||
|
reportsPath="$reportsPath-${{ matrix.mode }}"
|
||||||
|
fi
|
||||||
|
mkdir -p bundles $reportsPath
|
||||||
|
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
||||||
|
tar -xzf /tmp/reports.tar.gz -C $reportsPath
|
||||||
|
sudo chown -R $(id -u):$(id -g) $reportsPath
|
||||||
|
tree -nh $reportsPath
|
||||||
|
-
|
||||||
|
name: Send to Codecov
|
||||||
|
uses: codecov/codecov-action@v3
|
||||||
|
with:
|
||||||
|
directory: ./bundles/test-integration
|
||||||
|
env_vars: RUNNER_OS
|
||||||
|
flags: integration,${{ matrix.mode }}
|
||||||
|
-
|
||||||
|
name: Test daemon logs
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
cat bundles/test-integration/docker.log
|
||||||
|
-
|
||||||
|
name: Upload reports
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: integration-reports
|
||||||
|
path: /tmp/reports/*
|
||||||
|
|
||||||
|
integration-report:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
if: always()
|
||||||
|
needs:
|
||||||
|
- integration
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Set up Go
|
||||||
|
uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
-
|
||||||
|
name: Download reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: integration-reports
|
||||||
|
path: /tmp/reports
|
||||||
|
-
|
||||||
|
name: Install teststat
|
||||||
|
run: |
|
||||||
|
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
|
||||||
|
-
|
||||||
|
name: Create summary
|
||||||
|
run: |
|
||||||
|
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
integration-cli-prepare:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
needs:
|
||||||
|
- validate-dco
|
||||||
|
outputs:
|
||||||
|
matrix: ${{ steps.tests.outputs.matrix }}
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Set up Go
|
||||||
|
uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
-
|
||||||
|
name: Install gotestlist
|
||||||
|
run:
|
||||||
|
go install github.com/crazy-max/gotestlist/cmd/gotestlist@${{ env.GOTESTLIST_VERSION }}
|
||||||
|
-
|
||||||
|
name: Create matrix
|
||||||
|
id: tests
|
||||||
|
working-directory: ./integration-cli
|
||||||
|
run: |
|
||||||
|
# Distribute integration-cli tests for the matrix in integration-test job.
|
||||||
|
# Also prepend ./... to the matrix. This is a special case to run "Test integration" step exclusively.
|
||||||
|
matrix="$(gotestlist -d ${{ env.ITG_CLI_MATRIX_SIZE }} ./...)"
|
||||||
|
matrix="$(echo "$matrix" | jq -c '. |= ["./..."] + .')"
|
||||||
|
echo "matrix=$matrix" >> $GITHUB_OUTPUT
|
||||||
|
-
|
||||||
|
name: Show matrix
|
||||||
|
run: |
|
||||||
|
echo ${{ steps.tests.outputs.matrix }}
|
||||||
|
|
||||||
|
integration-cli:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
timeout-minutes: 120
|
||||||
|
needs:
|
||||||
|
- build-dev
|
||||||
|
- integration-cli-prepare
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
test: ${{ fromJson(needs.integration-cli-prepare.outputs.matrix) }}
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
-
|
||||||
|
name: Set up runner
|
||||||
|
uses: ./.github/actions/setup-runner
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
-
|
||||||
|
name: Build dev image
|
||||||
|
uses: docker/bake-action@v2
|
||||||
|
with:
|
||||||
|
targets: dev
|
||||||
|
set: |
|
||||||
|
dev.cache-from=type=gha,scope=dev
|
||||||
|
-
|
||||||
|
name: Test
|
||||||
|
run: |
|
||||||
|
make -o build test-integration
|
||||||
|
env:
|
||||||
|
TEST_SKIP_INTEGRATION: 1
|
||||||
|
TESTCOVERAGE: 1
|
||||||
|
TESTFLAGS: "-test.run (${{ matrix.test }})/"
|
||||||
|
-
|
||||||
|
name: Prepare reports
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
reportsPath=/tmp/reports/$(echo -n "${{ matrix.test }}" | sha256sum | cut -d " " -f 1)
|
||||||
|
mkdir -p bundles $reportsPath
|
||||||
|
echo "${{ matrix.test }}" | tr -s '|' '\n' | tee -a "$reportsPath/tests.txt"
|
||||||
|
find bundles -path '*/root/*overlay2' -prune -o -type f \( -name '*-report.json' -o -name '*.log' -o -name '*.out' -o -name '*.prof' -o -name '*-report.xml' \) -print | xargs sudo tar -czf /tmp/reports.tar.gz
|
||||||
|
tar -xzf /tmp/reports.tar.gz -C $reportsPath
|
||||||
|
sudo chown -R $(id -u):$(id -g) $reportsPath
|
||||||
|
tree -nh $reportsPath
|
||||||
|
-
|
||||||
|
name: Send to Codecov
|
||||||
|
uses: codecov/codecov-action@v3
|
||||||
|
with:
|
||||||
|
directory: ./bundles/test-integration
|
||||||
|
env_vars: RUNNER_OS
|
||||||
|
flags: integration-cli
|
||||||
|
-
|
||||||
|
name: Test daemon logs
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
cat bundles/test-integration/docker.log
|
||||||
|
-
|
||||||
|
name: Upload reports
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v3
|
||||||
|
with:
|
||||||
|
name: integration-cli-reports
|
||||||
|
path: /tmp/reports/*
|
||||||
|
|
||||||
|
integration-cli-report:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
if: always()
|
||||||
|
needs:
|
||||||
|
- integration-cli
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Set up Go
|
||||||
|
uses: actions/setup-go@v3
|
||||||
|
with:
|
||||||
|
go-version: ${{ env.GO_VERSION }}
|
||||||
|
-
|
||||||
|
name: Download reports
|
||||||
|
uses: actions/download-artifact@v3
|
||||||
|
with:
|
||||||
|
name: integration-cli-reports
|
||||||
|
path: /tmp/reports
|
||||||
|
-
|
||||||
|
name: Install teststat
|
||||||
|
run: |
|
||||||
|
go install github.com/vearutop/teststat@${{ env.TESTSTAT_VERSION }}
|
||||||
|
-
|
||||||
|
name: Create summary
|
||||||
|
run: |
|
||||||
|
teststat -markdown $(find /tmp/reports -type f -name '*.json' -print0 | xargs -0) >> $GITHUB_STEP_SUMMARY
|
22
.github/workflows/windows-2019.yml
vendored
Normal file
22
.github/workflows/windows-2019.yml
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
name: windows-2019
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
on:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 10 * * *'
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
validate-dco:
|
||||||
|
uses: ./.github/workflows/.dco.yml
|
||||||
|
|
||||||
|
run:
|
||||||
|
needs:
|
||||||
|
- validate-dco
|
||||||
|
uses: ./.github/workflows/.windows.yml
|
||||||
|
with:
|
||||||
|
os: windows-2019
|
||||||
|
send_coverage: false
|
25
.github/workflows/windows-2022.yml
vendored
Normal file
25
.github/workflows/windows-2022.yml
vendored
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
name: windows-2022
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
- '[0-9]+.[0-9]+'
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
validate-dco:
|
||||||
|
uses: ./.github/workflows/.dco.yml
|
||||||
|
|
||||||
|
run:
|
||||||
|
needs:
|
||||||
|
- validate-dco
|
||||||
|
uses: ./.github/workflows/.windows.yml
|
||||||
|
with:
|
||||||
|
os: windows-2022
|
||||||
|
send_coverage: true
|
41
.gitignore
vendored
41
.gitignore
vendored
|
@ -1,27 +1,30 @@
|
||||||
# Docker project generated files to ignore
|
# If you want to ignore files created by your editor/tools, please consider a
|
||||||
# if you want to ignore files created by your editor/tools,
|
# [global .gitignore](https://help.github.com/articles/ignoring-files).
|
||||||
# please consider a global .gitignore https://help.github.com/articles/ignoring-files
|
|
||||||
*.exe
|
*~
|
||||||
*.exe~
|
*.bak
|
||||||
*.gz
|
|
||||||
*.orig
|
*.orig
|
||||||
test.main
|
|
||||||
.*.swp
|
.*.swp
|
||||||
.DS_Store
|
.DS_Store
|
||||||
# a .bashrc may be added to customize the build environment
|
thumbs.db
|
||||||
|
|
||||||
|
# local repository customization
|
||||||
|
.envrc
|
||||||
.bashrc
|
.bashrc
|
||||||
.editorconfig
|
.editorconfig
|
||||||
.gopath/
|
|
||||||
.go-pkg-cache/
|
|
||||||
bundles/
|
|
||||||
cli/winresources/**/winres.json
|
|
||||||
cli/winresources/**/*.syso
|
|
||||||
cmd/dockerd/dockerd
|
|
||||||
contrib/builder/rpm/*/changelog
|
|
||||||
vendor/pkg/
|
|
||||||
go-test-report.json
|
|
||||||
profile.out
|
|
||||||
junit-report.xml
|
|
||||||
|
|
||||||
# top-level go.mod is not meant to be checked in
|
# top-level go.mod is not meant to be checked in
|
||||||
/go.mod
|
/go.mod
|
||||||
|
# build artifacts
|
||||||
|
bundles/
|
||||||
|
cli/winresources/*/*.syso
|
||||||
|
cli/winresources/*/winres.json
|
||||||
|
contrib/builder/rpm/*/changelog
|
||||||
|
|
||||||
|
# ci artifacts
|
||||||
|
*.exe
|
||||||
|
*.gz
|
||||||
|
go-test-report.json
|
||||||
|
junit-report.xml
|
||||||
|
profile.out
|
||||||
|
test.main
|
||||||
|
|
48
.mailmap
48
.mailmap
|
@ -1,14 +1,14 @@
|
||||||
# Generate AUTHORS: hack/generate-authors.sh
|
# This file lists the canonical name and email of contributors, and is used to
|
||||||
|
# generate AUTHORS (in hack/generate-authors.sh).
|
||||||
# Tip for finding duplicates (besides scanning the output of AUTHORS for name
|
|
||||||
# duplicates that aren't also email duplicates): scan the output of:
|
|
||||||
# git log --format='%aE - %aN' | sort -uf
|
|
||||||
#
|
#
|
||||||
# For explanation on this file format: man git-shortlog
|
# To find new duplicates, regenerate AUTHORS and scan for name duplicates, or
|
||||||
|
# run the following to find email duplicates:
|
||||||
|
# git log --format='%aE - %aN' | sort -uf | awk -v IGNORECASE=1 '$1 in a {print a[$1]; print}; {a[$1]=$0}'
|
||||||
|
#
|
||||||
|
# For an explanation of this file format, consult gitmailmap(5).
|
||||||
|
|
||||||
<21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
|
|
||||||
<mr.wrfly@gmail.com> <wrfly@users.noreply.github.com>
|
|
||||||
Aaron L. Xu <liker.xu@foxmail.com>
|
Aaron L. Xu <liker.xu@foxmail.com>
|
||||||
|
Aaron L. Xu <liker.xu@foxmail.com> <likexu@harmonycloud.cn>
|
||||||
Aaron Lehmann <alehmann@netflix.com>
|
Aaron Lehmann <alehmann@netflix.com>
|
||||||
Aaron Lehmann <alehmann@netflix.com> <aaron.lehmann@docker.com>
|
Aaron Lehmann <alehmann@netflix.com> <aaron.lehmann@docker.com>
|
||||||
Abhinandan Prativadi <aprativadi@gmail.com>
|
Abhinandan Prativadi <aprativadi@gmail.com>
|
||||||
|
@ -37,7 +37,6 @@ Aleksa Sarai <asarai@suse.de> <asarai@suse.com>
|
||||||
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
|
Aleksa Sarai <asarai@suse.de> <cyphar@cyphar.com>
|
||||||
Aleksandrs Fadins <aleks@s-ko.net>
|
Aleksandrs Fadins <aleks@s-ko.net>
|
||||||
Alessandro Boch <aboch@tetrationanalytics.com>
|
Alessandro Boch <aboch@tetrationanalytics.com>
|
||||||
Alessandro Boch <aboch@tetrationanalytics.com>
|
|
||||||
Alessandro Boch <aboch@tetrationanalytics.com> <aboch@docker.com>
|
Alessandro Boch <aboch@tetrationanalytics.com> <aboch@docker.com>
|
||||||
Alessandro Boch <aboch@tetrationanalytics.com> <aboch@socketplane.io>
|
Alessandro Boch <aboch@tetrationanalytics.com> <aboch@socketplane.io>
|
||||||
Alessandro Boch <aboch@tetrationanalytics.com> <aboch@users.noreply.github.com>
|
Alessandro Boch <aboch@tetrationanalytics.com> <aboch@users.noreply.github.com>
|
||||||
|
@ -50,6 +49,7 @@ Alexander Larsson <alexl@redhat.com> <alexander.larsson@gmail.com>
|
||||||
Alexander Morozov <lk4d4math@gmail.com>
|
Alexander Morozov <lk4d4math@gmail.com>
|
||||||
Alexander Morozov <lk4d4math@gmail.com> <lk4d4@docker.com>
|
Alexander Morozov <lk4d4math@gmail.com> <lk4d4@docker.com>
|
||||||
Alexandre Beslic <alexandre.beslic@gmail.com> <abronan@docker.com>
|
Alexandre Beslic <alexandre.beslic@gmail.com> <abronan@docker.com>
|
||||||
|
Alexandre González <agonzalezro@gmail.com>
|
||||||
Alexis Ries <ries.alexis@gmail.com>
|
Alexis Ries <ries.alexis@gmail.com>
|
||||||
Alexis Ries <ries.alexis@gmail.com> <alexis.ries.ext@orange.com>
|
Alexis Ries <ries.alexis@gmail.com> <alexis.ries.ext@orange.com>
|
||||||
Alexis Thomas <fr.alexisthomas@gmail.com>
|
Alexis Thomas <fr.alexisthomas@gmail.com>
|
||||||
|
@ -67,6 +67,8 @@ Andrey Kolomentsev <andrey.kolomentsev@docker.com> <andrey.kolomentsev@gmail.com
|
||||||
André Martins <aanm90@gmail.com> <martins@noironetworks.com>
|
André Martins <aanm90@gmail.com> <martins@noironetworks.com>
|
||||||
Andy Rothfusz <github@developersupport.net> <github@metaliveblog.com>
|
Andy Rothfusz <github@developersupport.net> <github@metaliveblog.com>
|
||||||
Andy Smith <github@anarkystic.com>
|
Andy Smith <github@anarkystic.com>
|
||||||
|
Andy Zhang <andy.zhangtao@hotmail.com>
|
||||||
|
Andy Zhang <andy.zhangtao@hotmail.com> <ztao@tibco-support.com>
|
||||||
Ankush Agarwal <ankushagarwal11@gmail.com> <ankushagarwal@users.noreply.github.com>
|
Ankush Agarwal <ankushagarwal11@gmail.com> <ankushagarwal@users.noreply.github.com>
|
||||||
Antonio Murdaca <antonio.murdaca@gmail.com> <amurdaca@redhat.com>
|
Antonio Murdaca <antonio.murdaca@gmail.com> <amurdaca@redhat.com>
|
||||||
Antonio Murdaca <antonio.murdaca@gmail.com> <me@runcom.ninja>
|
Antonio Murdaca <antonio.murdaca@gmail.com> <me@runcom.ninja>
|
||||||
|
@ -85,6 +87,7 @@ Arnaud Porterie <icecrime@gmail.com> <arnaud.porterie@docker.com>
|
||||||
Arnaud Rebillout <arnaud.rebillout@collabora.com>
|
Arnaud Rebillout <arnaud.rebillout@collabora.com>
|
||||||
Arnaud Rebillout <arnaud.rebillout@collabora.com> <elboulangero@gmail.com>
|
Arnaud Rebillout <arnaud.rebillout@collabora.com> <elboulangero@gmail.com>
|
||||||
Arthur Gautier <baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
|
Arthur Gautier <baloo@gandi.net> <superbaloo+registrations.github@superbaloo.net>
|
||||||
|
Artur Meyster <arthurfbi@yahoo.com>
|
||||||
Avi Miller <avi.miller@oracle.com> <avi.miller@gmail.com>
|
Avi Miller <avi.miller@oracle.com> <avi.miller@gmail.com>
|
||||||
Ben Bonnefoy <frenchben@docker.com>
|
Ben Bonnefoy <frenchben@docker.com>
|
||||||
Ben Golub <ben.golub@dotcloud.com>
|
Ben Golub <ben.golub@dotcloud.com>
|
||||||
|
@ -101,7 +104,9 @@ Bin Liu <liubin0329@gmail.com>
|
||||||
Bin Liu <liubin0329@gmail.com> <liubin0329@users.noreply.github.com>
|
Bin Liu <liubin0329@gmail.com> <liubin0329@users.noreply.github.com>
|
||||||
Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
||||||
Boaz Shuster <ripcurld.github@gmail.com>
|
Boaz Shuster <ripcurld.github@gmail.com>
|
||||||
|
Bojun Zhu <bojun.zhu@foxmail.com>
|
||||||
Boqin Qin <bobbqqin@gmail.com>
|
Boqin Qin <bobbqqin@gmail.com>
|
||||||
|
Boshi Lian <farmer1992@gmail.com>
|
||||||
Brandon Philips <brandon.philips@coreos.com> <brandon@ifup.co>
|
Brandon Philips <brandon.philips@coreos.com> <brandon@ifup.co>
|
||||||
Brandon Philips <brandon.philips@coreos.com> <brandon@ifup.org>
|
Brandon Philips <brandon.philips@coreos.com> <brandon@ifup.org>
|
||||||
Brent Salisbury <brent.salisbury@docker.com> <brent@docker.com>
|
Brent Salisbury <brent.salisbury@docker.com> <brent@docker.com>
|
||||||
|
@ -240,31 +245,36 @@ Gurjeet Singh <gurjeet@singh.im> <singh.gurjeet@gmail.com>
|
||||||
Gustav Sinder <gustav.sinder@gmail.com>
|
Gustav Sinder <gustav.sinder@gmail.com>
|
||||||
Günther Jungbluth <gunther@gameslabs.net>
|
Günther Jungbluth <gunther@gameslabs.net>
|
||||||
Hakan Özler <hakan.ozler@kodcu.com>
|
Hakan Özler <hakan.ozler@kodcu.com>
|
||||||
Hao Shu Wei <haosw@cn.ibm.com>
|
Hao Shu Wei <haoshuwei24@gmail.com>
|
||||||
Hao Shu Wei <haosw@cn.ibm.com> <haoshuwei1989@163.com>
|
Hao Shu Wei <haoshuwei24@gmail.com> <haoshuwei1989@163.com>
|
||||||
|
Hao Shu Wei <haoshuwei24@gmail.com> <haosw@cn.ibm.com>
|
||||||
Harald Albers <github@albersweb.de> <albers@users.noreply.github.com>
|
Harald Albers <github@albersweb.de> <albers@users.noreply.github.com>
|
||||||
Harald Niesche <harald@niesche.de>
|
Harald Niesche <harald@niesche.de>
|
||||||
Harold Cooper <hrldcpr@gmail.com>
|
Harold Cooper <hrldcpr@gmail.com>
|
||||||
|
Harry Zhang <harryz@hyper.sh>
|
||||||
Harry Zhang <harryz@hyper.sh> <harryzhang@zju.edu.cn>
|
Harry Zhang <harryz@hyper.sh> <harryzhang@zju.edu.cn>
|
||||||
Harry Zhang <harryz@hyper.sh> <resouer@163.com>
|
Harry Zhang <harryz@hyper.sh> <resouer@163.com>
|
||||||
Harry Zhang <harryz@hyper.sh> <resouer@gmail.com>
|
Harry Zhang <harryz@hyper.sh> <resouer@gmail.com>
|
||||||
Harry Zhang <resouer@163.com>
|
|
||||||
Harshal Patil <harshal.patil@in.ibm.com> <harche@users.noreply.github.com>
|
Harshal Patil <harshal.patil@in.ibm.com> <harche@users.noreply.github.com>
|
||||||
|
He Simei <hesimei@zju.edu.cn>
|
||||||
Helen Xie <chenjg@harmonycloud.cn>
|
Helen Xie <chenjg@harmonycloud.cn>
|
||||||
Hiroyuki Sasagawa <hs19870702@gmail.com>
|
Hiroyuki Sasagawa <hs19870702@gmail.com>
|
||||||
Hollie Teal <hollie@docker.com>
|
Hollie Teal <hollie@docker.com>
|
||||||
Hollie Teal <hollie@docker.com> <hollie.teal@docker.com>
|
Hollie Teal <hollie@docker.com> <hollie.teal@docker.com>
|
||||||
Hollie Teal <hollie@docker.com> <hollietealok@users.noreply.github.com>
|
Hollie Teal <hollie@docker.com> <hollietealok@users.noreply.github.com>
|
||||||
|
hsinko <21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
|
||||||
Hu Keping <hukeping@huawei.com>
|
Hu Keping <hukeping@huawei.com>
|
||||||
Hui Kang <hkang.sunysb@gmail.com>
|
Hui Kang <hkang.sunysb@gmail.com>
|
||||||
Hui Kang <hkang.sunysb@gmail.com> <kangh@us.ibm.com>
|
Hui Kang <hkang.sunysb@gmail.com> <kangh@us.ibm.com>
|
||||||
Huu Nguyen <huu@prismskylabs.com> <whoshuu@gmail.com>
|
Huu Nguyen <huu@prismskylabs.com> <whoshuu@gmail.com>
|
||||||
|
Hyeongkyu Lee <hyeongkyu.lee@navercorp.com>
|
||||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
||||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com> <1187766782@qq.com>
|
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com> <1187766782@qq.com>
|
||||||
Ian Campbell <ian.campbell@docker.com>
|
Ian Campbell <ian.campbell@docker.com>
|
||||||
Ian Campbell <ian.campbell@docker.com> <ijc@docker.com>
|
Ian Campbell <ian.campbell@docker.com> <ijc@docker.com>
|
||||||
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
||||||
Iskander Sharipov <quasilyte@gmail.com>
|
Iskander Sharipov <quasilyte@gmail.com>
|
||||||
|
Ivan Babrou <ibobrik@gmail.com>
|
||||||
Ivan Markin <sw@nogoegst.net> <twim@riseup.net>
|
Ivan Markin <sw@nogoegst.net> <twim@riseup.net>
|
||||||
Jack Laxson <jackjrabbit@gmail.com>
|
Jack Laxson <jackjrabbit@gmail.com>
|
||||||
Jacob Atzen <jacob@jacobatzen.dk> <jatzen@gmail.com>
|
Jacob Atzen <jacob@jacobatzen.dk> <jatzen@gmail.com>
|
||||||
|
@ -276,6 +286,7 @@ Jakub Drahos <jdrahos@pulsepoint.com> <jack.drahos@gmail.com>
|
||||||
James Nesbitt <jnesbitt@mirantis.com>
|
James Nesbitt <jnesbitt@mirantis.com>
|
||||||
James Nesbitt <jnesbitt@mirantis.com> <james.nesbitt@wunderkraut.com>
|
James Nesbitt <jnesbitt@mirantis.com> <james.nesbitt@wunderkraut.com>
|
||||||
Jamie Hannaford <jamie@limetree.org> <jamie.hannaford@rackspace.com>
|
Jamie Hannaford <jamie@limetree.org> <jamie.hannaford@rackspace.com>
|
||||||
|
Jan Götte <jaseg@jaseg.net>
|
||||||
Jana Radhakrishnan <mrjana@docker.com>
|
Jana Radhakrishnan <mrjana@docker.com>
|
||||||
Jana Radhakrishnan <mrjana@docker.com> <mrjana@socketplane.io>
|
Jana Radhakrishnan <mrjana@docker.com> <mrjana@socketplane.io>
|
||||||
Javier Bassi <javierbassi@gmail.com>
|
Javier Bassi <javierbassi@gmail.com>
|
||||||
|
@ -315,8 +326,8 @@ John Howard <github@lowenna.com> <10522484+lowenna@users.noreply.github.com>
|
||||||
John Howard <github@lowenna.com> <jhoward@microsoft.com>
|
John Howard <github@lowenna.com> <jhoward@microsoft.com>
|
||||||
John Howard <github@lowenna.com> <jhoward@ntdev.microsoft.com>
|
John Howard <github@lowenna.com> <jhoward@ntdev.microsoft.com>
|
||||||
John Howard <github@lowenna.com> <jhowardmsft@users.noreply.github.com>
|
John Howard <github@lowenna.com> <jhowardmsft@users.noreply.github.com>
|
||||||
John Howard <github@lowenna.com> <John.Howard@microsoft.com>
|
|
||||||
John Howard <github@lowenna.com> <john.howard@microsoft.com>
|
John Howard <github@lowenna.com> <john.howard@microsoft.com>
|
||||||
|
John Howard <github@lowenna.com> <john@lowenna.com>
|
||||||
John Stephens <johnstep@docker.com> <johnstep@users.noreply.github.com>
|
John Stephens <johnstep@docker.com> <johnstep@users.noreply.github.com>
|
||||||
Jon Surrell <jon.surrell@gmail.com> <jon.surrell@automattic.com>
|
Jon Surrell <jon.surrell@gmail.com> <jon.surrell@automattic.com>
|
||||||
Jonathan Choy <jonathan.j.choy@gmail.com>
|
Jonathan Choy <jonathan.j.choy@gmail.com>
|
||||||
|
@ -466,6 +477,7 @@ Mohit Soni <mosoni@ebay.com> <mohitsoni1989@gmail.com>
|
||||||
Moorthy RS <rsmoorthy@gmail.com> <rsmoorthy@users.noreply.github.com>
|
Moorthy RS <rsmoorthy@gmail.com> <rsmoorthy@users.noreply.github.com>
|
||||||
Moysés Borges <moysesb@gmail.com>
|
Moysés Borges <moysesb@gmail.com>
|
||||||
Moysés Borges <moysesb@gmail.com> <moyses.furtado@wplex.com.br>
|
Moysés Borges <moysesb@gmail.com> <moyses.furtado@wplex.com.br>
|
||||||
|
mrfly <mr.wrfly@gmail.com> <wrfly@users.noreply.github.com>
|
||||||
Nace Oroz <orkica@gmail.com>
|
Nace Oroz <orkica@gmail.com>
|
||||||
Natasha Jarus <linuxmercedes@gmail.com>
|
Natasha Jarus <linuxmercedes@gmail.com>
|
||||||
Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
|
Nathan LeClaire <nathan.leclaire@docker.com> <nathan.leclaire@gmail.com>
|
||||||
|
@ -507,6 +519,7 @@ Qiang Huang <h.huangqiang@huawei.com> <qhuang@10.0.2.15>
|
||||||
Qin TianHuan <tianhuan@bingotree.cn>
|
Qin TianHuan <tianhuan@bingotree.cn>
|
||||||
Ray Tsang <rayt@google.com> <saturnism@users.noreply.github.com>
|
Ray Tsang <rayt@google.com> <saturnism@users.noreply.github.com>
|
||||||
Renaud Gaubert <rgaubert@nvidia.com> <renaud.gaubert@gmail.com>
|
Renaud Gaubert <rgaubert@nvidia.com> <renaud.gaubert@gmail.com>
|
||||||
|
Richard Scothern <richard.scothern@gmail.com>
|
||||||
Robert Terhaar <rterhaar@atlanticdynamic.com> <robbyt@users.noreply.github.com>
|
Robert Terhaar <rterhaar@atlanticdynamic.com> <robbyt@users.noreply.github.com>
|
||||||
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
|
Roberto G. Hashioka <roberto.hashioka@docker.com> <roberto_hashioka@hotmail.com>
|
||||||
Roberto Muñoz Fernández <robertomf@gmail.com> <roberto.munoz.fernandez.contractor@bbva.com>
|
Roberto Muñoz Fernández <robertomf@gmail.com> <roberto.munoz.fernandez.contractor@bbva.com>
|
||||||
|
@ -529,11 +542,13 @@ Sandeep Bansal <sabansal@microsoft.com>
|
||||||
Sandeep Bansal <sabansal@microsoft.com> <msabansal@microsoft.com>
|
Sandeep Bansal <sabansal@microsoft.com> <msabansal@microsoft.com>
|
||||||
Santhosh Manohar <santhosh@docker.com>
|
Santhosh Manohar <santhosh@docker.com>
|
||||||
Sargun Dhillon <sargun@netflix.com> <sargun@sargun.me>
|
Sargun Dhillon <sargun@netflix.com> <sargun@sargun.me>
|
||||||
|
Satoshi Tagomori <tagomoris@gmail.com>
|
||||||
Sean Lee <seanlee@tw.ibm.com> <scaleoutsean@users.noreply.github.com>
|
Sean Lee <seanlee@tw.ibm.com> <scaleoutsean@users.noreply.github.com>
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
Sebastiaan van Stijn <github@gone.nl>
|
||||||
Sebastiaan van Stijn <github@gone.nl> <moby@example.com>
|
Sebastiaan van Stijn <github@gone.nl> <moby@example.com>
|
||||||
Sebastiaan van Stijn <github@gone.nl> <sebastiaan@ws-key-sebas3.dpi1.dpi>
|
Sebastiaan van Stijn <github@gone.nl> <sebastiaan@ws-key-sebas3.dpi1.dpi>
|
||||||
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||||
|
Seongyeol Lim <seongyeol37@gmail.com>
|
||||||
Shaun Kaasten <shaunk@gmail.com>
|
Shaun Kaasten <shaunk@gmail.com>
|
||||||
Shawn Landden <shawn@churchofgit.com> <shawnlandden@gmail.com>
|
Shawn Landden <shawn@churchofgit.com> <shawnlandden@gmail.com>
|
||||||
Shengbo Song <thomassong@tencent.com>
|
Shengbo Song <thomassong@tencent.com>
|
||||||
|
@ -542,8 +557,6 @@ Shih-Yuan Lee <fourdollars@gmail.com>
|
||||||
Shishir Mahajan <shishir.mahajan@redhat.com> <smahajan@redhat.com>
|
Shishir Mahajan <shishir.mahajan@redhat.com> <smahajan@redhat.com>
|
||||||
Shu-Wai Chow <shu-wai.chow@seattlechildrens.org>
|
Shu-Wai Chow <shu-wai.chow@seattlechildrens.org>
|
||||||
Shukui Yang <yangshukui@huawei.com>
|
Shukui Yang <yangshukui@huawei.com>
|
||||||
Shuwei Hao <haosw@cn.ibm.com>
|
|
||||||
Shuwei Hao <haosw@cn.ibm.com> <haoshuwei24@gmail.com>
|
|
||||||
Sidhartha Mani <sidharthamn@gmail.com>
|
Sidhartha Mani <sidharthamn@gmail.com>
|
||||||
Sjoerd Langkemper <sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
|
Sjoerd Langkemper <sjoerd-github@linuxonly.nl> <sjoerd@byte.nl>
|
||||||
Smark Meng <smark@freecoop.net>
|
Smark Meng <smark@freecoop.net>
|
||||||
|
@ -582,6 +595,7 @@ Sylvain Baubeau <lebauce@gmail.com>
|
||||||
Sylvain Baubeau <lebauce@gmail.com> <sbaubeau@redhat.com>
|
Sylvain Baubeau <lebauce@gmail.com> <sbaubeau@redhat.com>
|
||||||
Sylvain Bellemare <sylvain@ascribe.io>
|
Sylvain Bellemare <sylvain@ascribe.io>
|
||||||
Sylvain Bellemare <sylvain@ascribe.io> <sylvain.bellemare@ezeep.com>
|
Sylvain Bellemare <sylvain@ascribe.io> <sylvain.bellemare@ezeep.com>
|
||||||
|
Takuto Sato <tockn.jp@gmail.com>
|
||||||
Tangi Colin <tangicolin@gmail.com>
|
Tangi Colin <tangicolin@gmail.com>
|
||||||
Tejesh Mehta <tejesh.mehta@gmail.com> <tj@init.me>
|
Tejesh Mehta <tejesh.mehta@gmail.com> <tj@init.me>
|
||||||
Terry Chu <zue.hterry@gmail.com>
|
Terry Chu <zue.hterry@gmail.com>
|
||||||
|
@ -662,6 +676,7 @@ Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
||||||
Wayne Chang <wayne@neverfear.org>
|
Wayne Chang <wayne@neverfear.org>
|
||||||
Wayne Song <wsong@docker.com> <wsong@users.noreply.github.com>
|
Wayne Song <wsong@docker.com> <wsong@users.noreply.github.com>
|
||||||
Wei Wu <wuwei4455@gmail.com> cizixs <cizixs@163.com>
|
Wei Wu <wuwei4455@gmail.com> cizixs <cizixs@163.com>
|
||||||
|
Wei-Ting Kuo <waitingkuo0527@gmail.com>
|
||||||
Wen Cheng Ma <wenchma@cn.ibm.com>
|
Wen Cheng Ma <wenchma@cn.ibm.com>
|
||||||
Wenjun Tang <tangwj2@lenovo.com> <dodia@163.com>
|
Wenjun Tang <tangwj2@lenovo.com> <dodia@163.com>
|
||||||
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||||
|
@ -696,12 +711,15 @@ Yu Changchun <yuchangchun1@huawei.com>
|
||||||
Yu Chengxia <yuchengxia@huawei.com>
|
Yu Chengxia <yuchengxia@huawei.com>
|
||||||
Yu Peng <yu.peng36@zte.com.cn>
|
Yu Peng <yu.peng36@zte.com.cn>
|
||||||
Yu Peng <yu.peng36@zte.com.cn> <yupeng36@zte.com.cn>
|
Yu Peng <yu.peng36@zte.com.cn> <yupeng36@zte.com.cn>
|
||||||
|
Yuan Sun <sunyuan3@huawei.com>
|
||||||
Yue Zhang <zy675793960@yeah.net>
|
Yue Zhang <zy675793960@yeah.net>
|
||||||
Yufei Xiong <yufei.xiong@qq.com>
|
Yufei Xiong <yufei.xiong@qq.com>
|
||||||
Zach Gershman <zachgersh@gmail.com>
|
Zach Gershman <zachgersh@gmail.com>
|
||||||
Zach Gershman <zachgersh@gmail.com> <zachgersh@users.noreply.github.com>
|
Zach Gershman <zachgersh@gmail.com> <zachgersh@users.noreply.github.com>
|
||||||
Zachary Jaffee <zjaffee@us.ibm.com> <zij@case.edu>
|
Zachary Jaffee <zjaffee@us.ibm.com> <zij@case.edu>
|
||||||
Zachary Jaffee <zjaffee@us.ibm.com> <zjaffee@apache.org>
|
Zachary Jaffee <zjaffee@us.ibm.com> <zjaffee@apache.org>
|
||||||
|
Zhang Kun <zkazure@gmail.com>
|
||||||
|
Zhang Wentao <zhangwentao234@huawei.com>
|
||||||
ZhangHang <stevezhang2014@gmail.com>
|
ZhangHang <stevezhang2014@gmail.com>
|
||||||
Zhenkun Bi <bi.zhenkun@zte.com.cn>
|
Zhenkun Bi <bi.zhenkun@zte.com.cn>
|
||||||
Zhou Hao <zhouhao@cn.fujitsu.com>
|
Zhou Hao <zhouhao@cn.fujitsu.com>
|
||||||
|
|
45
AUTHORS
45
AUTHORS
|
@ -1,5 +1,6 @@
|
||||||
# This file lists all individuals having contributed content to the repository.
|
# File @generated by hack/generate-authors.sh. DO NOT EDIT.
|
||||||
# For how it is generated, see `hack/generate-authors.sh`.
|
# This file lists all contributors to the repository.
|
||||||
|
# See hack/generate-authors.sh to make modifications.
|
||||||
|
|
||||||
Aanand Prasad <aanand.prasad@gmail.com>
|
Aanand Prasad <aanand.prasad@gmail.com>
|
||||||
Aaron Davidson <aaron@databricks.com>
|
Aaron Davidson <aaron@databricks.com>
|
||||||
|
@ -9,7 +10,6 @@ Aaron Huslage <huslage@gmail.com>
|
||||||
Aaron L. Xu <liker.xu@foxmail.com>
|
Aaron L. Xu <liker.xu@foxmail.com>
|
||||||
Aaron Lehmann <alehmann@netflix.com>
|
Aaron Lehmann <alehmann@netflix.com>
|
||||||
Aaron Welch <welch@packet.net>
|
Aaron Welch <welch@packet.net>
|
||||||
Aaron.L.Xu <likexu@harmonycloud.cn>
|
|
||||||
Abel Muiño <amuino@gmail.com>
|
Abel Muiño <amuino@gmail.com>
|
||||||
Abhijeet Kasurde <akasurde@redhat.com>
|
Abhijeet Kasurde <akasurde@redhat.com>
|
||||||
Abhinandan Prativadi <aprativadi@gmail.com>
|
Abhinandan Prativadi <aprativadi@gmail.com>
|
||||||
|
@ -17,6 +17,7 @@ Abhinav Ajgaonkar <abhinav316@gmail.com>
|
||||||
Abhishek Chanda <abhishek.becs@gmail.com>
|
Abhishek Chanda <abhishek.becs@gmail.com>
|
||||||
Abhishek Sharma <abhishek@asharma.me>
|
Abhishek Sharma <abhishek@asharma.me>
|
||||||
Abin Shahab <ashahab@altiscale.com>
|
Abin Shahab <ashahab@altiscale.com>
|
||||||
|
Abirdcfly <fp544037857@gmail.com>
|
||||||
Ada Mancini <ada@docker.com>
|
Ada Mancini <ada@docker.com>
|
||||||
Adam Avilla <aavilla@yp.com>
|
Adam Avilla <aavilla@yp.com>
|
||||||
Adam Dobrawy <naczelnik@jawnosc.tk>
|
Adam Dobrawy <naczelnik@jawnosc.tk>
|
||||||
|
@ -161,7 +162,6 @@ Andrey Kolomentsev <andrey.kolomentsev@docker.com>
|
||||||
Andrey Petrov <andrey.petrov@shazow.net>
|
Andrey Petrov <andrey.petrov@shazow.net>
|
||||||
Andrey Stolbovsky <andrey.stolbovsky@gmail.com>
|
Andrey Stolbovsky <andrey.stolbovsky@gmail.com>
|
||||||
André Martins <aanm90@gmail.com>
|
André Martins <aanm90@gmail.com>
|
||||||
andy <ztao@tibco-support.com>
|
|
||||||
Andy Chambers <anchambers@paypal.com>
|
Andy Chambers <anchambers@paypal.com>
|
||||||
andy diller <dillera@gmail.com>
|
andy diller <dillera@gmail.com>
|
||||||
Andy Goldstein <agoldste@redhat.com>
|
Andy Goldstein <agoldste@redhat.com>
|
||||||
|
@ -170,6 +170,7 @@ Andy Lindeman <alindeman@salesforce.com>
|
||||||
Andy Rothfusz <github@developersupport.net>
|
Andy Rothfusz <github@developersupport.net>
|
||||||
Andy Smith <github@anarkystic.com>
|
Andy Smith <github@anarkystic.com>
|
||||||
Andy Wilson <wilson.andrew.j+github@gmail.com>
|
Andy Wilson <wilson.andrew.j+github@gmail.com>
|
||||||
|
Andy Zhang <andy.zhangtao@hotmail.com>
|
||||||
Anes Hasicic <anes.hasicic@gmail.com>
|
Anes Hasicic <anes.hasicic@gmail.com>
|
||||||
Angel Velazquez <angelcar@amazon.com>
|
Angel Velazquez <angelcar@amazon.com>
|
||||||
Anil Belur <askb23@gmail.com>
|
Anil Belur <askb23@gmail.com>
|
||||||
|
@ -209,6 +210,7 @@ Artur Meyster <arthurfbi@yahoo.com>
|
||||||
Arun Gupta <arun.gupta@gmail.com>
|
Arun Gupta <arun.gupta@gmail.com>
|
||||||
Asad Saeeduddin <masaeedu@gmail.com>
|
Asad Saeeduddin <masaeedu@gmail.com>
|
||||||
Asbjørn Enge <asbjorn@hanafjedle.net>
|
Asbjørn Enge <asbjorn@hanafjedle.net>
|
||||||
|
Austin Vazquez <macedonv@amazon.com>
|
||||||
averagehuman <averagehuman@users.noreply.github.com>
|
averagehuman <averagehuman@users.noreply.github.com>
|
||||||
Avi Das <andas222@gmail.com>
|
Avi Das <andas222@gmail.com>
|
||||||
Avi Kivity <avi@scylladb.com>
|
Avi Kivity <avi@scylladb.com>
|
||||||
|
@ -222,6 +224,7 @@ Barnaby Gray <barnaby@pickle.me.uk>
|
||||||
Barry Allard <barry.allard@gmail.com>
|
Barry Allard <barry.allard@gmail.com>
|
||||||
Bartłomiej Piotrowski <b@bpiotrowski.pl>
|
Bartłomiej Piotrowski <b@bpiotrowski.pl>
|
||||||
Bastiaan Bakker <bbakker@xebia.com>
|
Bastiaan Bakker <bbakker@xebia.com>
|
||||||
|
Bastien Pascard <bpascard@hotmail.com>
|
||||||
bdevloed <boris.de.vloed@gmail.com>
|
bdevloed <boris.de.vloed@gmail.com>
|
||||||
Bearice Ren <bearice@gmail.com>
|
Bearice Ren <bearice@gmail.com>
|
||||||
Ben Bonnefoy <frenchben@docker.com>
|
Ben Bonnefoy <frenchben@docker.com>
|
||||||
|
@ -229,6 +232,7 @@ Ben Firshman <ben@firshman.co.uk>
|
||||||
Ben Golub <ben.golub@dotcloud.com>
|
Ben Golub <ben.golub@dotcloud.com>
|
||||||
Ben Gould <ben@bengould.co.uk>
|
Ben Gould <ben@bengould.co.uk>
|
||||||
Ben Hall <ben@benhall.me.uk>
|
Ben Hall <ben@benhall.me.uk>
|
||||||
|
Ben Langfeld <ben@langfeld.me>
|
||||||
Ben Sargent <ben@brokendigits.com>
|
Ben Sargent <ben@brokendigits.com>
|
||||||
Ben Severson <BenSeverson@users.noreply.github.com>
|
Ben Severson <BenSeverson@users.noreply.github.com>
|
||||||
Ben Toews <mastahyeti@gmail.com>
|
Ben Toews <mastahyeti@gmail.com>
|
||||||
|
@ -258,6 +262,7 @@ Bjorn Neergaard <bneergaard@mirantis.com>
|
||||||
Blake Geno <blakegeno@gmail.com>
|
Blake Geno <blakegeno@gmail.com>
|
||||||
Boaz Shuster <ripcurld.github@gmail.com>
|
Boaz Shuster <ripcurld.github@gmail.com>
|
||||||
bobby abbott <ttobbaybbob@gmail.com>
|
bobby abbott <ttobbaybbob@gmail.com>
|
||||||
|
Bojun Zhu <bojun.zhu@foxmail.com>
|
||||||
Boqin Qin <bobbqqin@gmail.com>
|
Boqin Qin <bobbqqin@gmail.com>
|
||||||
Boris Pruessmann <boris@pruessmann.org>
|
Boris Pruessmann <boris@pruessmann.org>
|
||||||
Boshi Lian <farmer1992@gmail.com>
|
Boshi Lian <farmer1992@gmail.com>
|
||||||
|
@ -339,6 +344,7 @@ Charlie Drage <charlie@charliedrage.com>
|
||||||
Charlie Lewis <charliel@lab41.org>
|
Charlie Lewis <charliel@lab41.org>
|
||||||
Chase Bolt <chase.bolt@gmail.com>
|
Chase Bolt <chase.bolt@gmail.com>
|
||||||
ChaYoung You <yousbe@gmail.com>
|
ChaYoung You <yousbe@gmail.com>
|
||||||
|
Chee Hau Lim <ch33hau@gmail.com>
|
||||||
Chen Chao <cc272309126@gmail.com>
|
Chen Chao <cc272309126@gmail.com>
|
||||||
Chen Chuanliang <chen.chuanliang@zte.com.cn>
|
Chen Chuanliang <chen.chuanliang@zte.com.cn>
|
||||||
Chen Hanxiao <chenhanxiao@cn.fujitsu.com>
|
Chen Hanxiao <chenhanxiao@cn.fujitsu.com>
|
||||||
|
@ -545,7 +551,6 @@ Derek Ch <denc716@gmail.com>
|
||||||
Derek McGowan <derek@mcg.dev>
|
Derek McGowan <derek@mcg.dev>
|
||||||
Deric Crago <deric.crago@gmail.com>
|
Deric Crago <deric.crago@gmail.com>
|
||||||
Deshi Xiao <dxiao@redhat.com>
|
Deshi Xiao <dxiao@redhat.com>
|
||||||
devmeyster <arthurfbi@yahoo.com>
|
|
||||||
Devon Estes <devon.estes@klarna.com>
|
Devon Estes <devon.estes@klarna.com>
|
||||||
Devvyn Murphy <devvyn@devvyn.com>
|
Devvyn Murphy <devvyn@devvyn.com>
|
||||||
Dharmit Shah <shahdharmit@gmail.com>
|
Dharmit Shah <shahdharmit@gmail.com>
|
||||||
|
@ -650,6 +655,7 @@ Erik Dubbelboer <erik@dubbelboer.com>
|
||||||
Erik Hollensbe <github@hollensbe.org>
|
Erik Hollensbe <github@hollensbe.org>
|
||||||
Erik Inge Bolsø <knan@redpill-linpro.com>
|
Erik Inge Bolsø <knan@redpill-linpro.com>
|
||||||
Erik Kristensen <erik@erikkristensen.com>
|
Erik Kristensen <erik@erikkristensen.com>
|
||||||
|
Erik Sipsma <erik@sipsma.dev>
|
||||||
Erik St. Martin <alakriti@gmail.com>
|
Erik St. Martin <alakriti@gmail.com>
|
||||||
Erik Weathers <erikdw@gmail.com>
|
Erik Weathers <erikdw@gmail.com>
|
||||||
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
||||||
|
@ -707,6 +713,7 @@ Fengtu Wang <wangfengtu@huawei.com>
|
||||||
Ferenc Szabo <pragmaticfrank@gmail.com>
|
Ferenc Szabo <pragmaticfrank@gmail.com>
|
||||||
Fernando <fermayo@gmail.com>
|
Fernando <fermayo@gmail.com>
|
||||||
Fero Volar <alian@alian.info>
|
Fero Volar <alian@alian.info>
|
||||||
|
Feroz Salam <feroz.salam@sourcegraph.com>
|
||||||
Ferran Rodenas <frodenas@gmail.com>
|
Ferran Rodenas <frodenas@gmail.com>
|
||||||
Filipe Brandenburger <filbranden@google.com>
|
Filipe Brandenburger <filbranden@google.com>
|
||||||
Filipe Oliveira <contato@fmoliveira.com.br>
|
Filipe Oliveira <contato@fmoliveira.com.br>
|
||||||
|
@ -822,7 +829,7 @@ Hamish Hutchings <moredhel@aoeu.me>
|
||||||
Hannes Ljungberg <hannes@5monkeys.se>
|
Hannes Ljungberg <hannes@5monkeys.se>
|
||||||
Hans Kristian Flaatten <hans@starefossen.com>
|
Hans Kristian Flaatten <hans@starefossen.com>
|
||||||
Hans Rødtang <hansrodtang@gmail.com>
|
Hans Rødtang <hansrodtang@gmail.com>
|
||||||
Hao Shu Wei <haosw@cn.ibm.com>
|
Hao Shu Wei <haoshuwei24@gmail.com>
|
||||||
Hao Zhang <21521210@zju.edu.cn>
|
Hao Zhang <21521210@zju.edu.cn>
|
||||||
Harald Albers <github@albersweb.de>
|
Harald Albers <github@albersweb.de>
|
||||||
Harald Niesche <harald@niesche.de>
|
Harald Niesche <harald@niesche.de>
|
||||||
|
@ -861,10 +868,9 @@ Hui Kang <hkang.sunysb@gmail.com>
|
||||||
Hunter Blanks <hunter@twilio.com>
|
Hunter Blanks <hunter@twilio.com>
|
||||||
huqun <huqun@zju.edu.cn>
|
huqun <huqun@zju.edu.cn>
|
||||||
Huu Nguyen <huu@prismskylabs.com>
|
Huu Nguyen <huu@prismskylabs.com>
|
||||||
hyeongkyu.lee <hyeongkyu.lee@navercorp.com>
|
Hyeongkyu Lee <hyeongkyu.lee@navercorp.com>
|
||||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
||||||
Iago López Galeiras <iago@kinvolk.io>
|
Iago López Galeiras <iago@kinvolk.io>
|
||||||
Ian Babrou <ibobrik@gmail.com>
|
|
||||||
Ian Bishop <ianbishop@pace7.com>
|
Ian Bishop <ianbishop@pace7.com>
|
||||||
Ian Bull <irbull@gmail.com>
|
Ian Bull <irbull@gmail.com>
|
||||||
Ian Calvert <ianjcalvert@gmail.com>
|
Ian Calvert <ianjcalvert@gmail.com>
|
||||||
|
@ -881,6 +887,7 @@ Igor Dolzhikov <bluesriverz@gmail.com>
|
||||||
Igor Karpovich <i.karpovich@currencysolutions.com>
|
Igor Karpovich <i.karpovich@currencysolutions.com>
|
||||||
Iliana Weller <iweller@amazon.com>
|
Iliana Weller <iweller@amazon.com>
|
||||||
Ilkka Laukkanen <ilkka@ilkka.io>
|
Ilkka Laukkanen <ilkka@ilkka.io>
|
||||||
|
Illo Abdulrahim <abdulrahim.illo@nokia.com>
|
||||||
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||||
Ilya Gusev <mail@igusev.ru>
|
Ilya Gusev <mail@igusev.ru>
|
||||||
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
||||||
|
@ -931,6 +938,7 @@ Jamie Hannaford <jamie@limetree.org>
|
||||||
Jamshid Afshar <jafshar@yahoo.com>
|
Jamshid Afshar <jafshar@yahoo.com>
|
||||||
Jan Breig <git@pygos.space>
|
Jan Breig <git@pygos.space>
|
||||||
Jan Chren <dev.rindeal@gmail.com>
|
Jan Chren <dev.rindeal@gmail.com>
|
||||||
|
Jan Götte <jaseg@jaseg.net>
|
||||||
Jan Keromnes <janx@linux.com>
|
Jan Keromnes <janx@linux.com>
|
||||||
Jan Koprowski <jan.koprowski@gmail.com>
|
Jan Koprowski <jan.koprowski@gmail.com>
|
||||||
Jan Pazdziora <jpazdziora@redhat.com>
|
Jan Pazdziora <jpazdziora@redhat.com>
|
||||||
|
@ -943,7 +951,6 @@ Januar Wayong <januar@gmail.com>
|
||||||
Jared Biel <jared.biel@bolderthinking.com>
|
Jared Biel <jared.biel@bolderthinking.com>
|
||||||
Jared Hocutt <jaredh@netapp.com>
|
Jared Hocutt <jaredh@netapp.com>
|
||||||
Jaroslaw Zabiello <hipertracker@gmail.com>
|
Jaroslaw Zabiello <hipertracker@gmail.com>
|
||||||
jaseg <jaseg@jaseg.net>
|
|
||||||
Jasmine Hegman <jasmine@jhegman.com>
|
Jasmine Hegman <jasmine@jhegman.com>
|
||||||
Jason A. Donenfeld <Jason@zx2c4.com>
|
Jason A. Donenfeld <Jason@zx2c4.com>
|
||||||
Jason Divock <jdivock@gmail.com>
|
Jason Divock <jdivock@gmail.com>
|
||||||
|
@ -1214,7 +1221,6 @@ Kris-Mikael Krister <krismikael@protonmail.com>
|
||||||
Kristian Haugene <kristian.haugene@capgemini.com>
|
Kristian Haugene <kristian.haugene@capgemini.com>
|
||||||
Kristina Zabunova <triara.xiii@gmail.com>
|
Kristina Zabunova <triara.xiii@gmail.com>
|
||||||
Krystian Wojcicki <kwojcicki@sympatico.ca>
|
Krystian Wojcicki <kwojcicki@sympatico.ca>
|
||||||
Kun Zhang <zkazure@gmail.com>
|
|
||||||
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
||||||
Kunal Tyagi <tyagi.kunal@live.com>
|
Kunal Tyagi <tyagi.kunal@live.com>
|
||||||
Kyle Conroy <kyle.j.conroy@gmail.com>
|
Kyle Conroy <kyle.j.conroy@gmail.com>
|
||||||
|
@ -1242,7 +1248,6 @@ Leandro Siqueira <leandro.siqueira@gmail.com>
|
||||||
Lee Calcote <leecalcote@gmail.com>
|
Lee Calcote <leecalcote@gmail.com>
|
||||||
Lee Chao <932819864@qq.com>
|
Lee Chao <932819864@qq.com>
|
||||||
Lee, Meng-Han <sunrisedm4@gmail.com>
|
Lee, Meng-Han <sunrisedm4@gmail.com>
|
||||||
leeplay <hyeongkyu.lee@navercorp.com>
|
|
||||||
Lei Gong <lgong@alauda.io>
|
Lei Gong <lgong@alauda.io>
|
||||||
Lei Jitang <leijitang@huawei.com>
|
Lei Jitang <leijitang@huawei.com>
|
||||||
Leiiwang <u2takey@gmail.com>
|
Leiiwang <u2takey@gmail.com>
|
||||||
|
@ -1269,7 +1274,6 @@ Lifubang <lifubang@acmcoder.com>
|
||||||
Lihua Tang <lhtang@alauda.io>
|
Lihua Tang <lhtang@alauda.io>
|
||||||
Lily Guo <lily.guo@docker.com>
|
Lily Guo <lily.guo@docker.com>
|
||||||
limeidan <limeidan@loongson.cn>
|
limeidan <limeidan@loongson.cn>
|
||||||
limsy <seongyeol37@gmail.com>
|
|
||||||
Lin Lu <doraalin@163.com>
|
Lin Lu <doraalin@163.com>
|
||||||
LingFaKe <lingfake@huawei.com>
|
LingFaKe <lingfake@huawei.com>
|
||||||
Linus Heckemann <lheckemann@twig-world.com>
|
Linus Heckemann <lheckemann@twig-world.com>
|
||||||
|
@ -1299,6 +1303,7 @@ Lucas Chi <lucas@teacherspayteachers.com>
|
||||||
Lucas Molas <lmolas@fundacionsadosky.org.ar>
|
Lucas Molas <lmolas@fundacionsadosky.org.ar>
|
||||||
Lucas Silvestre <lukas.silvestre@gmail.com>
|
Lucas Silvestre <lukas.silvestre@gmail.com>
|
||||||
Luciano Mores <leslau@gmail.com>
|
Luciano Mores <leslau@gmail.com>
|
||||||
|
Luis Henrique Mulinari <luis.mulinari@gmail.com>
|
||||||
Luis Martínez de Bartolomé Izquierdo <lmartinez@biicode.com>
|
Luis Martínez de Bartolomé Izquierdo <lmartinez@biicode.com>
|
||||||
Luiz Svoboda <luizek@gmail.com>
|
Luiz Svoboda <luizek@gmail.com>
|
||||||
Lukas Heeren <lukas-heeren@hotmail.com>
|
Lukas Heeren <lukas-heeren@hotmail.com>
|
||||||
|
@ -1347,6 +1352,7 @@ Marius Gundersen <me@mariusgundersen.net>
|
||||||
Marius Sturm <marius@graylog.com>
|
Marius Sturm <marius@graylog.com>
|
||||||
Marius Voila <marius.voila@gmail.com>
|
Marius Voila <marius.voila@gmail.com>
|
||||||
Mark Allen <mrallen1@yahoo.com>
|
Mark Allen <mrallen1@yahoo.com>
|
||||||
|
Mark Feit <mfeit@internet2.edu>
|
||||||
Mark Jeromin <mark.jeromin@sysfrog.net>
|
Mark Jeromin <mark.jeromin@sysfrog.net>
|
||||||
Mark McGranaghan <mmcgrana@gmail.com>
|
Mark McGranaghan <mmcgrana@gmail.com>
|
||||||
Mark McKinstry <mmckinst@umich.edu>
|
Mark McKinstry <mmckinst@umich.edu>
|
||||||
|
@ -1362,6 +1368,7 @@ Markus Fix <lispmeister@gmail.com>
|
||||||
Markus Kortlang <hyp3rdino@googlemail.com>
|
Markus Kortlang <hyp3rdino@googlemail.com>
|
||||||
Martijn Dwars <ikben@martijndwars.nl>
|
Martijn Dwars <ikben@martijndwars.nl>
|
||||||
Martijn van Oosterhout <kleptog@svana.org>
|
Martijn van Oosterhout <kleptog@svana.org>
|
||||||
|
Martin Braun <braun@neuroforge.de>
|
||||||
Martin Dojcak <martin.dojcak@lablabs.io>
|
Martin Dojcak <martin.dojcak@lablabs.io>
|
||||||
Martin Honermeyer <maze@strahlungsfrei.de>
|
Martin Honermeyer <maze@strahlungsfrei.de>
|
||||||
Martin Kelly <martin@surround.io>
|
Martin Kelly <martin@surround.io>
|
||||||
|
@ -1678,6 +1685,7 @@ Petr Švihlík <svihlik.petr@gmail.com>
|
||||||
Petros Angelatos <petrosagg@gmail.com>
|
Petros Angelatos <petrosagg@gmail.com>
|
||||||
Phil <underscorephil@gmail.com>
|
Phil <underscorephil@gmail.com>
|
||||||
Phil Estes <estesp@gmail.com>
|
Phil Estes <estesp@gmail.com>
|
||||||
|
Phil Sphicas <phil.sphicas@att.com>
|
||||||
Phil Spitler <pspitler@gmail.com>
|
Phil Spitler <pspitler@gmail.com>
|
||||||
Philip Alexander Etling <paetling@gmail.com>
|
Philip Alexander Etling <paetling@gmail.com>
|
||||||
Philip Monroe <phil@philmonroe.com>
|
Philip Monroe <phil@philmonroe.com>
|
||||||
|
@ -1749,7 +1757,6 @@ Ricardo N Feliciano <FelicianoTech@gmail.com>
|
||||||
Rich Horwood <rjhorwood@apple.com>
|
Rich Horwood <rjhorwood@apple.com>
|
||||||
Rich Moyse <rich@moyse.us>
|
Rich Moyse <rich@moyse.us>
|
||||||
Rich Seymour <rseymour@gmail.com>
|
Rich Seymour <rseymour@gmail.com>
|
||||||
Richard <richard.scothern@gmail.com>
|
|
||||||
Richard Burnison <rburnison@ebay.com>
|
Richard Burnison <rburnison@ebay.com>
|
||||||
Richard Harvey <richard@squarecows.com>
|
Richard Harvey <richard@squarecows.com>
|
||||||
Richard Mathie <richard.mathie@amey.co.uk>
|
Richard Mathie <richard.mathie@amey.co.uk>
|
||||||
|
@ -1848,7 +1855,6 @@ Ryo Nakao <nakabonne@gmail.com>
|
||||||
Ryoga Saito <contact@proelbtn.com>
|
Ryoga Saito <contact@proelbtn.com>
|
||||||
Rémy Greinhofer <remy.greinhofer@livelovely.com>
|
Rémy Greinhofer <remy.greinhofer@livelovely.com>
|
||||||
s. rannou <mxs@sbrk.org>
|
s. rannou <mxs@sbrk.org>
|
||||||
s00318865 <sunyuan3@huawei.com>
|
|
||||||
Sabin Basyal <sabin.basyal@gmail.com>
|
Sabin Basyal <sabin.basyal@gmail.com>
|
||||||
Sachin Joshi <sachin_jayant_joshi@hotmail.com>
|
Sachin Joshi <sachin_jayant_joshi@hotmail.com>
|
||||||
Sagar Hani <sagarhani33@gmail.com>
|
Sagar Hani <sagarhani33@gmail.com>
|
||||||
|
@ -1938,7 +1944,6 @@ Shourya Sarcar <shourya.sarcar@gmail.com>
|
||||||
Shu-Wai Chow <shu-wai.chow@seattlechildrens.org>
|
Shu-Wai Chow <shu-wai.chow@seattlechildrens.org>
|
||||||
shuai-z <zs.broccoli@gmail.com>
|
shuai-z <zs.broccoli@gmail.com>
|
||||||
Shukui Yang <yangshukui@huawei.com>
|
Shukui Yang <yangshukui@huawei.com>
|
||||||
Shuwei Hao <haosw@cn.ibm.com>
|
|
||||||
Sian Lerk Lau <kiawin@gmail.com>
|
Sian Lerk Lau <kiawin@gmail.com>
|
||||||
Siarhei Rasiukevich <s_rasiukevich@wargaming.net>
|
Siarhei Rasiukevich <s_rasiukevich@wargaming.net>
|
||||||
Sidhartha Mani <sidharthamn@gmail.com>
|
Sidhartha Mani <sidharthamn@gmail.com>
|
||||||
|
@ -1946,7 +1951,6 @@ sidharthamani <sid@rancher.com>
|
||||||
Silas Sewell <silas@sewell.org>
|
Silas Sewell <silas@sewell.org>
|
||||||
Silvan Jegen <s.jegen@gmail.com>
|
Silvan Jegen <s.jegen@gmail.com>
|
||||||
Simão Reis <smnrsti@gmail.com>
|
Simão Reis <smnrsti@gmail.com>
|
||||||
Simei He <hesimei@zju.edu.cn>
|
|
||||||
Simon Barendse <simon.barendse@gmail.com>
|
Simon Barendse <simon.barendse@gmail.com>
|
||||||
Simon Eskildsen <sirup@sirupsen.com>
|
Simon Eskildsen <sirup@sirupsen.com>
|
||||||
Simon Ferquel <simon.ferquel@docker.com>
|
Simon Ferquel <simon.ferquel@docker.com>
|
||||||
|
@ -2022,7 +2026,7 @@ Sébastien Stormacq <sebsto@users.noreply.github.com>
|
||||||
Sören Tempel <soeren+git@soeren-tempel.net>
|
Sören Tempel <soeren+git@soeren-tempel.net>
|
||||||
Tabakhase <mail@tabakhase.com>
|
Tabakhase <mail@tabakhase.com>
|
||||||
Tadej Janež <tadej.j@nez.si>
|
Tadej Janež <tadej.j@nez.si>
|
||||||
TAGOMORI Satoshi <tagomoris@gmail.com>
|
Takuto Sato <tockn.jp@gmail.com>
|
||||||
tang0th <tang0th@gmx.com>
|
tang0th <tang0th@gmx.com>
|
||||||
Tangi Colin <tangicolin@gmail.com>
|
Tangi Colin <tangicolin@gmail.com>
|
||||||
Tatsuki Sugiura <sugi@nemui.org>
|
Tatsuki Sugiura <sugi@nemui.org>
|
||||||
|
@ -2035,7 +2039,6 @@ Tejaswini Duggaraju <naduggar@microsoft.com>
|
||||||
Tejesh Mehta <tejesh.mehta@gmail.com>
|
Tejesh Mehta <tejesh.mehta@gmail.com>
|
||||||
Terry Chu <zue.hterry@gmail.com>
|
Terry Chu <zue.hterry@gmail.com>
|
||||||
terryding77 <550147740@qq.com>
|
terryding77 <550147740@qq.com>
|
||||||
tgic <farmer1992@gmail.com>
|
|
||||||
Thatcher Peskens <thatcher@docker.com>
|
Thatcher Peskens <thatcher@docker.com>
|
||||||
theadactyl <thea.lamkin@gmail.com>
|
theadactyl <thea.lamkin@gmail.com>
|
||||||
Thell 'Bo' Fowler <thell@tbfowler.name>
|
Thell 'Bo' Fowler <thell@tbfowler.name>
|
||||||
|
@ -2059,6 +2062,7 @@ Thomas Swift <tgs242@gmail.com>
|
||||||
Thomas Tanaka <thomas.tanaka@oracle.com>
|
Thomas Tanaka <thomas.tanaka@oracle.com>
|
||||||
Thomas Texier <sharkone@en-mousse.org>
|
Thomas Texier <sharkone@en-mousse.org>
|
||||||
Ti Zhou <tizhou1986@gmail.com>
|
Ti Zhou <tizhou1986@gmail.com>
|
||||||
|
Tiago Seabra <tlgs@users.noreply.github.com>
|
||||||
Tianon Gravi <admwiggin@gmail.com>
|
Tianon Gravi <admwiggin@gmail.com>
|
||||||
Tianyi Wang <capkurmagati@gmail.com>
|
Tianyi Wang <capkurmagati@gmail.com>
|
||||||
Tibor Vass <teabee89@gmail.com>
|
Tibor Vass <teabee89@gmail.com>
|
||||||
|
@ -2197,7 +2201,6 @@ VladimirAus <v_roudakov@yahoo.com>
|
||||||
Vladislav Kolesnikov <vkolesnikov@beget.ru>
|
Vladislav Kolesnikov <vkolesnikov@beget.ru>
|
||||||
Vlastimil Zeman <vlastimil.zeman@diffblue.com>
|
Vlastimil Zeman <vlastimil.zeman@diffblue.com>
|
||||||
Vojtech Vitek (V-Teq) <vvitek@redhat.com>
|
Vojtech Vitek (V-Teq) <vvitek@redhat.com>
|
||||||
waitingkuo <waitingkuo0527@gmail.com>
|
|
||||||
Walter Leibbrandt <github@wrl.co.za>
|
Walter Leibbrandt <github@wrl.co.za>
|
||||||
Walter Stanish <walter@pratyeka.org>
|
Walter Stanish <walter@pratyeka.org>
|
||||||
Wang Chao <chao.wang@ucloud.cn>
|
Wang Chao <chao.wang@ucloud.cn>
|
||||||
|
@ -2227,7 +2230,6 @@ Wendel Fleming <wfleming@usc.edu>
|
||||||
Wenjun Tang <tangwj2@lenovo.com>
|
Wenjun Tang <tangwj2@lenovo.com>
|
||||||
Wenkai Yin <yinw@vmware.com>
|
Wenkai Yin <yinw@vmware.com>
|
||||||
wenlxie <wenlxie@ebay.com>
|
wenlxie <wenlxie@ebay.com>
|
||||||
Wentao Zhang <zhangwentao234@huawei.com>
|
|
||||||
Wenxuan Zhao <viz@linux.com>
|
Wenxuan Zhao <viz@linux.com>
|
||||||
Wenyu You <21551128@zju.edu.cn>
|
Wenyu You <21551128@zju.edu.cn>
|
||||||
Wenzhi Liang <wenzhi.liang@gmail.com>
|
Wenzhi Liang <wenzhi.liang@gmail.com>
|
||||||
|
@ -2286,6 +2288,7 @@ Yang Bai <hamo.by@gmail.com>
|
||||||
Yang Li <idealhack@gmail.com>
|
Yang Li <idealhack@gmail.com>
|
||||||
Yang Pengfei <yangpengfei4@huawei.com>
|
Yang Pengfei <yangpengfei4@huawei.com>
|
||||||
yangchenliang <yangchenliang@huawei.com>
|
yangchenliang <yangchenliang@huawei.com>
|
||||||
|
Yann Autissier <yann.autissier@gmail.com>
|
||||||
Yanqiang Miao <miao.yanqiang@zte.com.cn>
|
Yanqiang Miao <miao.yanqiang@zte.com.cn>
|
||||||
Yao Zaiyong <yaozaiyong@hotmail.com>
|
Yao Zaiyong <yaozaiyong@hotmail.com>
|
||||||
Yash Murty <yashmurty@gmail.com>
|
Yash Murty <yashmurty@gmail.com>
|
||||||
|
@ -2305,6 +2308,7 @@ Yosef Fertel <yfertel@gmail.com>
|
||||||
You-Sheng Yang (楊有勝) <vicamo@gmail.com>
|
You-Sheng Yang (楊有勝) <vicamo@gmail.com>
|
||||||
youcai <omegacoleman@gmail.com>
|
youcai <omegacoleman@gmail.com>
|
||||||
Youcef YEKHLEF <yyekhlef@gmail.com>
|
Youcef YEKHLEF <yyekhlef@gmail.com>
|
||||||
|
Youfu Zhang <zhangyoufu@gmail.com>
|
||||||
Yu Changchun <yuchangchun1@huawei.com>
|
Yu Changchun <yuchangchun1@huawei.com>
|
||||||
Yu Chengxia <yuchengxia@huawei.com>
|
Yu Chengxia <yuchengxia@huawei.com>
|
||||||
Yu Peng <yu.peng36@zte.com.cn>
|
Yu Peng <yu.peng36@zte.com.cn>
|
||||||
|
@ -2357,7 +2361,6 @@ Zou Yu <zouyu7@huawei.com>
|
||||||
zqh <zqhxuyuan@gmail.com>
|
zqh <zqhxuyuan@gmail.com>
|
||||||
Zuhayr Elahi <zuhayr.elahi@docker.com>
|
Zuhayr Elahi <zuhayr.elahi@docker.com>
|
||||||
Zunayed Ali <zunayed@gmail.com>
|
Zunayed Ali <zunayed@gmail.com>
|
||||||
Álex González <agonzalezro@gmail.com>
|
|
||||||
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
||||||
Átila Camurça Alves <camurca.home@gmail.com>
|
Átila Camurça Alves <camurca.home@gmail.com>
|
||||||
尹吉峰 <jifeng.yin@gmail.com>
|
尹吉峰 <jifeng.yin@gmail.com>
|
||||||
|
|
3609
CHANGELOG.md
3609
CHANGELOG.md
File diff suppressed because it is too large
Load diff
40
Dockerfile
40
Dockerfile
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
ARG CROSS="false"
|
ARG CROSS="false"
|
||||||
ARG SYSTEMD="false"
|
ARG SYSTEMD="false"
|
||||||
ARG GO_VERSION=1.18.3
|
ARG GO_VERSION=1.19.3
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
ARG VPNKIT_VERSION=0.5.0
|
ARG VPNKIT_VERSION=0.5.0
|
||||||
|
|
||||||
|
@ -74,6 +74,9 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||||
&& git checkout -q "$GO_SWAGGER_COMMIT" \
|
&& git checkout -q "$GO_SWAGGER_COMMIT" \
|
||||||
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger
|
&& go build -o /build/swagger github.com/go-swagger/go-swagger/cmd/swagger
|
||||||
|
|
||||||
|
# frozen-images
|
||||||
|
# See also frozenImages in "testutil/environment/protect.go" (which needs to
|
||||||
|
# be updated when adding images to this list)
|
||||||
FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images
|
FROM debian:${BASE_DEBIAN_DISTRO} AS frozen-images
|
||||||
ARG DEBIAN_FRONTEND
|
ARG DEBIAN_FRONTEND
|
||||||
RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \
|
RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/lib/apt \
|
||||||
|
@ -85,13 +88,13 @@ RUN --mount=type=cache,sharing=locked,id=moby-frozen-images-aptlib,target=/var/l
|
||||||
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
# Get useful and necessary Hub images so we can "docker load" locally instead of pulling
|
||||||
COPY contrib/download-frozen-image-v2.sh /
|
COPY contrib/download-frozen-image-v2.sh /
|
||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
|
ARG TARGETVARIANT
|
||||||
RUN /download-frozen-image-v2.sh /build \
|
RUN /download-frozen-image-v2.sh /build \
|
||||||
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
|
busybox:latest@sha256:95cf004f559831017cdf4628aaf1bb30133677be8702a8c5f2994629f637a209 \
|
||||||
busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \
|
busybox:glibc@sha256:1f81263701cddf6402afe9f33fca0266d9fff379e59b1748f33d3072da71ee85 \
|
||||||
debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \
|
debian:bullseye-slim@sha256:dacf278785a4daa9de07596ec739dbc07131e189942772210709c5c0777e8437 \
|
||||||
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
|
hello-world:latest@sha256:d58e752213a51785838f9eed2b7a498ffa1cb3aa7f946dda11af39286c3db9a9 \
|
||||||
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
|
arm32v7/hello-world:latest@sha256:50b8560ad574c779908da71f7ce370c0a2471c098d44d1c8f6b513c5a55eeeb1
|
||||||
# See also frozenImages in "testutil/environment/protect.go" (which needs to be updated when adding images to this list)
|
|
||||||
|
|
||||||
FROM base AS cross-false
|
FROM base AS cross-false
|
||||||
|
|
||||||
|
@ -183,7 +186,7 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||||
|
|
||||||
FROM base AS gowinres
|
FROM base AS gowinres
|
||||||
# GOWINRES_VERSION defines go-winres tool version
|
# GOWINRES_VERSION defines go-winres tool version
|
||||||
ARG GOWINRES_VERSION=v0.2.3
|
ARG GOWINRES_VERSION=v0.3.0
|
||||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
GOBIN=/build/ GO111MODULE=on go install "github.com/tc-hib/go-winres@${GOWINRES_VERSION}" \
|
GOBIN=/build/ GO111MODULE=on go install "github.com/tc-hib/go-winres@${GOWINRES_VERSION}" \
|
||||||
|
@ -202,14 +205,15 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||||
PREFIX=/build /install.sh containerd
|
PREFIX=/build /install.sh containerd
|
||||||
|
|
||||||
FROM base AS golangci_lint
|
FROM base AS golangci_lint
|
||||||
ARG GOLANGCI_LINT_VERSION=v1.46.2
|
# FIXME: when updating golangci-lint, remove the temporary "nolint" in https://github.com/moby/moby/blob/7860686a8df15eea9def9e6189c6f9eca031bb6f/libnetwork/networkdb/cluster.go#L246
|
||||||
|
ARG GOLANGCI_LINT_VERSION=v1.49.0
|
||||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \
|
GOBIN=/build/ GO111MODULE=on go install "github.com/golangci/golangci-lint/cmd/golangci-lint@${GOLANGCI_LINT_VERSION}" \
|
||||||
&& /build/golangci-lint --version
|
&& /build/golangci-lint --version
|
||||||
|
|
||||||
FROM base AS gotestsum
|
FROM base AS gotestsum
|
||||||
ARG GOTESTSUM_VERSION=v1.8.1
|
ARG GOTESTSUM_VERSION=v1.8.2
|
||||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
GOBIN=/build/ GO111MODULE=on go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}" \
|
GOBIN=/build/ GO111MODULE=on go install "gotest.tools/gotestsum@${GOTESTSUM_VERSION}" \
|
||||||
|
@ -288,13 +292,18 @@ RUN --mount=type=tmpfs,target=/tmp/crun-build \
|
||||||
./configure --bindir=/build && \
|
./configure --bindir=/build && \
|
||||||
make -j install
|
make -j install
|
||||||
|
|
||||||
FROM --platform=amd64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-amd64
|
# vpnkit
|
||||||
|
# use dummy scratch stage to avoid build to fail for unsupported platforms
|
||||||
FROM --platform=arm64 djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-arm64
|
FROM scratch AS vpnkit-windows
|
||||||
|
FROM scratch AS vpnkit-linux-386
|
||||||
FROM scratch AS vpnkit
|
FROM scratch AS vpnkit-linux-arm
|
||||||
COPY --from=vpnkit-amd64 /vpnkit /build/vpnkit.x86_64
|
FROM scratch AS vpnkit-linux-ppc64le
|
||||||
COPY --from=vpnkit-arm64 /vpnkit /build/vpnkit.aarch64
|
FROM scratch AS vpnkit-linux-riscv64
|
||||||
|
FROM scratch AS vpnkit-linux-s390x
|
||||||
|
FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-linux-amd64
|
||||||
|
FROM djs55/vpnkit:${VPNKIT_VERSION} AS vpnkit-linux-arm64
|
||||||
|
FROM vpnkit-linux-${TARGETARCH} AS vpnkit-linux
|
||||||
|
FROM vpnkit-${TARGETOS} AS vpnkit
|
||||||
|
|
||||||
# TODO: Some of this is only really needed for testing, it would be nice to split this up
|
# TODO: Some of this is only really needed for testing, it would be nice to split this up
|
||||||
FROM runtime-dev AS dev-systemd-false
|
FROM runtime-dev AS dev-systemd-false
|
||||||
|
@ -349,7 +358,8 @@ RUN update-alternatives --set iptables /usr/sbin/iptables-legacy || true \
|
||||||
&& update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \
|
&& update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy || true \
|
||||||
&& update-alternatives --set arptables /usr/sbin/arptables-legacy || true
|
&& update-alternatives --set arptables /usr/sbin/arptables-legacy || true
|
||||||
|
|
||||||
RUN pip3 install yamllint==1.26.1
|
ARG YAMLLINT_VERSION=1.27.1
|
||||||
|
RUN pip3 install yamllint==${YAMLLINT_VERSION}
|
||||||
|
|
||||||
COPY --from=dockercli /build/ /usr/local/cli
|
COPY --from=dockercli /build/ /usr/local/cli
|
||||||
COPY --from=frozen-images /build/ /docker-frozen-images
|
COPY --from=frozen-images /build/ /docker-frozen-images
|
||||||
|
@ -366,7 +376,7 @@ COPY --from=shfmt /build/ /usr/local/bin/
|
||||||
COPY --from=runc /build/ /usr/local/bin/
|
COPY --from=runc /build/ /usr/local/bin/
|
||||||
COPY --from=containerd /build/ /usr/local/bin/
|
COPY --from=containerd /build/ /usr/local/bin/
|
||||||
COPY --from=rootlesskit /build/ /usr/local/bin/
|
COPY --from=rootlesskit /build/ /usr/local/bin/
|
||||||
COPY --from=vpnkit /build/ /usr/local/bin/
|
COPY --from=vpnkit / /usr/local/bin/
|
||||||
COPY --from=crun /build/ /usr/local/bin/
|
COPY --from=crun /build/ /usr/local/bin/
|
||||||
COPY hack/dockerfile/etc/docker/ /etc/docker/
|
COPY hack/dockerfile/etc/docker/ /etc/docker/
|
||||||
ENV PATH=/usr/local/cli:$PATH
|
ENV PATH=/usr/local/cli:$PATH
|
||||||
|
@ -416,7 +426,7 @@ COPY --from=tini /build/ /usr/local/bin/
|
||||||
COPY --from=runc /build/ /usr/local/bin/
|
COPY --from=runc /build/ /usr/local/bin/
|
||||||
COPY --from=containerd /build/ /usr/local/bin/
|
COPY --from=containerd /build/ /usr/local/bin/
|
||||||
COPY --from=rootlesskit /build/ /usr/local/bin/
|
COPY --from=rootlesskit /build/ /usr/local/bin/
|
||||||
COPY --from=vpnkit /build/ /usr/local/bin/
|
COPY --from=vpnkit / /usr/local/bin/
|
||||||
COPY --from=gowinres /build/ /usr/local/bin/
|
COPY --from=gowinres /build/ /usr/local/bin/
|
||||||
WORKDIR /go/src/github.com/docker/docker
|
WORKDIR /go/src/github.com/docker/docker
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
ARG GO_VERSION=1.18.3
|
ARG GO_VERSION=1.19.3
|
||||||
|
|
||||||
FROM golang:${GO_VERSION}-alpine AS base
|
FROM golang:${GO_VERSION}-alpine AS base
|
||||||
ENV GO111MODULE=off
|
ENV GO111MODULE=off
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
# This represents the bare minimum required to build and test Docker.
|
# This represents the bare minimum required to build and test Docker.
|
||||||
|
|
||||||
ARG GO_VERSION=1.18.3
|
ARG GO_VERSION=1.19.3
|
||||||
|
|
||||||
ARG BASE_DEBIAN_DISTRO="bullseye"
|
ARG BASE_DEBIAN_DISTRO="bullseye"
|
||||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||||
|
|
|
@ -155,7 +155,7 @@
|
||||||
# The number of build steps below are explicitly minimised to improve performance.
|
# The number of build steps below are explicitly minimised to improve performance.
|
||||||
|
|
||||||
# Extremely important - do not change the following line to reference a "specific" image,
|
# Extremely important - do not change the following line to reference a "specific" image,
|
||||||
# such as `mcr.microsoft.com/windows/servercore:ltsc2019`. If using this Dockerfile in process
|
# such as `mcr.microsoft.com/windows/servercore:ltsc2022`. If using this Dockerfile in process
|
||||||
# isolated containers, the kernel of the host must match the container image, and hence
|
# isolated containers, the kernel of the host must match the container image, and hence
|
||||||
# would fail between Windows Server 2016 (aka RS1) and Windows Server 2019 (aka RS5).
|
# would fail between Windows Server 2016 (aka RS1) and Windows Server 2019 (aka RS5).
|
||||||
# It is expected that the image `microsoft/windowsservercore:latest` is present, and matches
|
# It is expected that the image `microsoft/windowsservercore:latest` is present, and matches
|
||||||
|
@ -165,10 +165,10 @@ FROM microsoft/windowsservercore
|
||||||
# Use PowerShell as the default shell
|
# Use PowerShell as the default shell
|
||||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||||
|
|
||||||
ARG GO_VERSION=1.18.3
|
ARG GO_VERSION=1.19.3
|
||||||
ARG CONTAINERD_VERSION=v1.6.6
|
ARG GOTESTSUM_VERSION=v1.8.2
|
||||||
ARG GOTESTSUM_VERSION=v1.8.1
|
ARG GOWINRES_VERSION=v0.3.0
|
||||||
ARG GOWINRES_VERSION=v0.2.3
|
ARG CONTAINERD_VERSION=v1.6.10
|
||||||
|
|
||||||
# Environment variable notes:
|
# Environment variable notes:
|
||||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||||
|
|
513
Jenkinsfile
vendored
513
Jenkinsfile
vendored
|
@ -8,11 +8,6 @@ pipeline {
|
||||||
timestamps()
|
timestamps()
|
||||||
}
|
}
|
||||||
parameters {
|
parameters {
|
||||||
booleanParam(name: 'unit_validate', defaultValue: true, description: 'amd64 (x86_64) unit tests and vendor check')
|
|
||||||
booleanParam(name: 'validate_force', defaultValue: false, description: 'force validation steps to be run, even if no changes were detected')
|
|
||||||
booleanParam(name: 'amd64', defaultValue: true, description: 'amd64 (x86_64) Build/Test')
|
|
||||||
booleanParam(name: 'rootless', defaultValue: true, description: 'amd64 (x86_64) Build/Test (Rootless mode)')
|
|
||||||
booleanParam(name: 'cgroup2', defaultValue: true, description: 'amd64 (x86_64) Build/Test (cgroup v2)')
|
|
||||||
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
|
booleanParam(name: 'arm64', defaultValue: true, description: 'ARM (arm64) Build/Test')
|
||||||
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
|
booleanParam(name: 's390x', defaultValue: false, description: 'IBM Z (s390x) Build/Test')
|
||||||
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
|
booleanParam(name: 'ppc64le', defaultValue: false, description: 'PowerPC (ppc64le) Build/Test')
|
||||||
|
@ -44,7 +39,7 @@ pipeline {
|
||||||
beforeAgent true
|
beforeAgent true
|
||||||
expression { params.dco }
|
expression { params.dco }
|
||||||
}
|
}
|
||||||
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
agent { label 'arm64 && ubuntu-2004' }
|
||||||
steps {
|
steps {
|
||||||
sh '''
|
sh '''
|
||||||
docker run --rm \
|
docker run --rm \
|
||||||
|
@ -57,494 +52,6 @@ pipeline {
|
||||||
}
|
}
|
||||||
stage('Build') {
|
stage('Build') {
|
||||||
parallel {
|
parallel {
|
||||||
stage('unit-validate') {
|
|
||||||
when {
|
|
||||||
beforeAgent true
|
|
||||||
expression { params.unit_validate }
|
|
||||||
}
|
|
||||||
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
|
||||||
environment {
|
|
||||||
// On master ("non-pull-request"), force running some validation checks (vendor, swagger),
|
|
||||||
// even if no files were changed. This allows catching problems caused by pull-requests
|
|
||||||
// that were merged out-of-sequence.
|
|
||||||
TEST_FORCE_VALIDATE = sh returnStdout: true, script: 'if [ "${BRANCH_NAME%%-*}" != "PR" ] || [ "${CHANGE_TARGET:-master}" != "master" ] || [ "${validate_force}" = "true" ]; then echo "1"; fi'
|
|
||||||
}
|
|
||||||
|
|
||||||
stages {
|
|
||||||
stage("Print info") {
|
|
||||||
steps {
|
|
||||||
sh 'docker version'
|
|
||||||
sh 'docker info'
|
|
||||||
sh '''
|
|
||||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
|
||||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
|
||||||
&& bash ${WORKSPACE}/check-config.sh || true
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage("Build dev image") {
|
|
||||||
steps {
|
|
||||||
sh 'docker build --force-rm --build-arg APT_MIRROR --build-arg CROSS=true -t docker:${GIT_COMMIT} .'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage("Validate") {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
docker run --rm -t --privileged \
|
|
||||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
|
||||||
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
|
||||||
--name docker-pr$BUILD_NUMBER \
|
|
||||||
-e DOCKER_EXPERIMENTAL \
|
|
||||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
|
||||||
-e DOCKER_GRAPHDRIVER \
|
|
||||||
-e TEST_FORCE_VALIDATE \
|
|
||||||
-e VALIDATE_REPO=${GIT_URL} \
|
|
||||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
|
||||||
docker:${GIT_COMMIT} \
|
|
||||||
hack/validate/default
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage("Docker-py") {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
docker run --rm -t --privileged \
|
|
||||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
|
||||||
--name docker-pr$BUILD_NUMBER \
|
|
||||||
-e DOCKER_EXPERIMENTAL \
|
|
||||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
|
||||||
-e DOCKER_GRAPHDRIVER \
|
|
||||||
-e VALIDATE_REPO=${GIT_URL} \
|
|
||||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
|
||||||
docker:${GIT_COMMIT} \
|
|
||||||
hack/make.sh \
|
|
||||||
dynbinary \
|
|
||||||
test-docker-py
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
post {
|
|
||||||
always {
|
|
||||||
junit testResults: 'bundles/test-docker-py/junit-report.xml', allowEmptyResults: true
|
|
||||||
|
|
||||||
sh '''
|
|
||||||
echo "Ensuring container killed."
|
|
||||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
|
||||||
'''
|
|
||||||
|
|
||||||
sh '''
|
|
||||||
echo 'Chowning /workspace to jenkins user'
|
|
||||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
|
||||||
'''
|
|
||||||
|
|
||||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
|
||||||
sh '''
|
|
||||||
bundleName=docker-py
|
|
||||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
|
||||||
tar -czf ${bundleName}-bundles.tar.gz bundles/test-docker-py/*.xml bundles/test-docker-py/*.log
|
|
||||||
'''
|
|
||||||
|
|
||||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage("Static") {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
docker run --rm -t --privileged \
|
|
||||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
|
||||||
--name docker-pr$BUILD_NUMBER \
|
|
||||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
|
||||||
-e DOCKER_GRAPHDRIVER \
|
|
||||||
docker:${GIT_COMMIT} \
|
|
||||||
hack/make.sh binary
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage("Cross") {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
docker run --rm -t --privileged \
|
|
||||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
|
||||||
--name docker-pr$BUILD_NUMBER \
|
|
||||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
|
||||||
-e DOCKER_GRAPHDRIVER \
|
|
||||||
docker:${GIT_COMMIT} \
|
|
||||||
hack/make.sh cross
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// needs to be last stage that calls make.sh for the junit report to work
|
|
||||||
stage("Unit tests") {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
sudo modprobe ip6table_filter
|
|
||||||
'''
|
|
||||||
sh '''
|
|
||||||
docker run --rm -t --privileged \
|
|
||||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
|
||||||
--name docker-pr$BUILD_NUMBER \
|
|
||||||
-e DOCKER_EXPERIMENTAL \
|
|
||||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
|
||||||
-e DOCKER_GRAPHDRIVER \
|
|
||||||
-e VALIDATE_REPO=${GIT_URL} \
|
|
||||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
|
||||||
docker:${GIT_COMMIT} \
|
|
||||||
hack/test/unit
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
post {
|
|
||||||
always {
|
|
||||||
junit testResults: 'bundles/junit-report*.xml', allowEmptyResults: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage("Validate vendor") {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
docker run --rm -t --privileged \
|
|
||||||
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
|
||||||
--name docker-pr$BUILD_NUMBER \
|
|
||||||
-e DOCKER_EXPERIMENTAL \
|
|
||||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
|
||||||
-e DOCKER_GRAPHDRIVER \
|
|
||||||
-e TEST_FORCE_VALIDATE \
|
|
||||||
-e VALIDATE_REPO=${GIT_URL} \
|
|
||||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
|
||||||
docker:${GIT_COMMIT} \
|
|
||||||
hack/validate/vendor
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
post {
|
|
||||||
always {
|
|
||||||
sh '''
|
|
||||||
echo 'Ensuring container killed.'
|
|
||||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
|
||||||
'''
|
|
||||||
|
|
||||||
sh '''
|
|
||||||
echo 'Chowning /workspace to jenkins user'
|
|
||||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
|
||||||
'''
|
|
||||||
|
|
||||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
|
||||||
sh '''
|
|
||||||
bundleName=unit
|
|
||||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
|
||||||
tar -czvf ${bundleName}-bundles.tar.gz bundles/junit-report*.xml bundles/go-test-report*.json bundles/profile*.out
|
|
||||||
'''
|
|
||||||
|
|
||||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cleanup {
|
|
||||||
sh 'make clean'
|
|
||||||
deleteDir()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('amd64') {
|
|
||||||
when {
|
|
||||||
beforeAgent true
|
|
||||||
expression { params.amd64 }
|
|
||||||
}
|
|
||||||
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
|
||||||
|
|
||||||
stages {
|
|
||||||
stage("Print info") {
|
|
||||||
steps {
|
|
||||||
sh 'docker version'
|
|
||||||
sh 'docker info'
|
|
||||||
sh '''
|
|
||||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
|
||||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
|
||||||
&& bash ${WORKSPACE}/check-config.sh || true
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage("Build dev image") {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
# todo: include ip_vs in base image
|
|
||||||
sudo modprobe ip_vs
|
|
||||||
|
|
||||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage("Run tests") {
|
|
||||||
steps {
|
|
||||||
sh '''#!/bin/bash
|
|
||||||
# bash is needed so 'jobs -p' works properly
|
|
||||||
# it also accepts setting inline envvars for functions without explicitly exporting
|
|
||||||
set -x
|
|
||||||
|
|
||||||
run_tests() {
|
|
||||||
[ -n "$TESTDEBUG" ] && rm= || rm=--rm;
|
|
||||||
docker run $rm -t --privileged \
|
|
||||||
-v "$WORKSPACE/bundles/${TEST_INTEGRATION_DEST}:/go/src/github.com/docker/docker/bundles" \
|
|
||||||
-v "$WORKSPACE/bundles/dynbinary-daemon:/go/src/github.com/docker/docker/bundles/dynbinary-daemon" \
|
|
||||||
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
|
||||||
--name "$CONTAINER_NAME" \
|
|
||||||
-e KEEPBUNDLE=1 \
|
|
||||||
-e TESTDEBUG \
|
|
||||||
-e TESTFLAGS \
|
|
||||||
-e TEST_SKIP_INTEGRATION \
|
|
||||||
-e TEST_SKIP_INTEGRATION_CLI \
|
|
||||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
|
||||||
-e DOCKER_GRAPHDRIVER \
|
|
||||||
-e TIMEOUT \
|
|
||||||
-e VALIDATE_REPO=${GIT_URL} \
|
|
||||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
|
||||||
docker:${GIT_COMMIT} \
|
|
||||||
hack/make.sh \
|
|
||||||
"$1" \
|
|
||||||
test-integration
|
|
||||||
}
|
|
||||||
|
|
||||||
trap "exit" INT TERM
|
|
||||||
trap 'pids=$(jobs -p); echo "Remaining pids to kill: [$pids]"; [ -z "$pids" ] || kill $pids' EXIT
|
|
||||||
|
|
||||||
CONTAINER_NAME=docker-pr$BUILD_NUMBER
|
|
||||||
|
|
||||||
docker run --rm -t --privileged \
|
|
||||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
|
||||||
-v "$WORKSPACE/.git:/go/src/github.com/docker/docker/.git" \
|
|
||||||
--name ${CONTAINER_NAME}-build \
|
|
||||||
-e DOCKER_EXPERIMENTAL \
|
|
||||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
|
||||||
-e DOCKER_GRAPHDRIVER \
|
|
||||||
docker:${GIT_COMMIT} \
|
|
||||||
hack/make.sh \
|
|
||||||
dynbinary
|
|
||||||
|
|
||||||
# flaky + integration
|
|
||||||
TEST_INTEGRATION_DEST=1 CONTAINER_NAME=${CONTAINER_NAME}-1 TEST_SKIP_INTEGRATION_CLI=1 run_tests test-integration-flaky &
|
|
||||||
|
|
||||||
# integration-cli first set
|
|
||||||
TEST_INTEGRATION_DEST=2 CONTAINER_NAME=${CONTAINER_NAME}-2 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSuite|DockerNetworkSuite|DockerHubPullSuite|DockerRegistrySuite|DockerSchema1RegistrySuite|DockerRegistryAuthTokenSuite|DockerRegistryAuthHtpasswdSuite)/" run_tests &
|
|
||||||
|
|
||||||
# integration-cli second set
|
|
||||||
TEST_INTEGRATION_DEST=3 CONTAINER_NAME=${CONTAINER_NAME}-3 TEST_SKIP_INTEGRATION=1 TESTFLAGS="-test.run Test(DockerSwarmSuite|DockerDaemonSuite|DockerExternalVolumeSuite)/" run_tests &
|
|
||||||
|
|
||||||
c=0
|
|
||||||
for job in $(jobs -p); do
|
|
||||||
wait ${job} || c=$?
|
|
||||||
done
|
|
||||||
exit $c
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
post {
|
|
||||||
always {
|
|
||||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
post {
|
|
||||||
always {
|
|
||||||
sh '''
|
|
||||||
echo "Ensuring container killed."
|
|
||||||
cids=$(docker ps -aq -f name=docker-pr${BUILD_NUMBER}-*)
|
|
||||||
[ -n "$cids" ] && docker rm -vf $cids || true
|
|
||||||
'''
|
|
||||||
|
|
||||||
sh '''
|
|
||||||
echo "Chowning /workspace to jenkins user"
|
|
||||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
|
||||||
'''
|
|
||||||
|
|
||||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
|
||||||
sh '''
|
|
||||||
bundleName=amd64
|
|
||||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
|
||||||
# exclude overlay2 directories
|
|
||||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
|
||||||
'''
|
|
||||||
|
|
||||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cleanup {
|
|
||||||
sh 'make clean'
|
|
||||||
deleteDir()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage('rootless') {
|
|
||||||
when {
|
|
||||||
beforeAgent true
|
|
||||||
expression { params.rootless }
|
|
||||||
}
|
|
||||||
agent { label 'amd64 && ubuntu-1804 && overlay2' }
|
|
||||||
stages {
|
|
||||||
stage("Print info") {
|
|
||||||
steps {
|
|
||||||
sh 'docker version'
|
|
||||||
sh 'docker info'
|
|
||||||
sh '''
|
|
||||||
echo "check-config.sh version: ${CHECK_CONFIG_COMMIT}"
|
|
||||||
curl -fsSL -o ${WORKSPACE}/check-config.sh "https://raw.githubusercontent.com/moby/moby/${CHECK_CONFIG_COMMIT}/contrib/check-config.sh" \
|
|
||||||
&& bash ${WORKSPACE}/check-config.sh || true
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage("Build dev image") {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
docker build --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage("Integration tests") {
|
|
||||||
environment {
|
|
||||||
DOCKER_ROOTLESS = '1'
|
|
||||||
TEST_SKIP_INTEGRATION_CLI = '1'
|
|
||||||
}
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
docker run --rm -t --privileged \
|
|
||||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
|
||||||
--name docker-pr$BUILD_NUMBER \
|
|
||||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
|
||||||
-e DOCKER_GRAPHDRIVER \
|
|
||||||
-e DOCKER_EXPERIMENTAL \
|
|
||||||
-e DOCKER_ROOTLESS \
|
|
||||||
-e TEST_SKIP_INTEGRATION_CLI \
|
|
||||||
-e TIMEOUT \
|
|
||||||
-e VALIDATE_REPO=${GIT_URL} \
|
|
||||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
|
||||||
docker:${GIT_COMMIT} \
|
|
||||||
hack/make.sh \
|
|
||||||
dynbinary \
|
|
||||||
test-integration
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
post {
|
|
||||||
always {
|
|
||||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
post {
|
|
||||||
always {
|
|
||||||
sh '''
|
|
||||||
echo "Ensuring container killed."
|
|
||||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
|
||||||
'''
|
|
||||||
|
|
||||||
sh '''
|
|
||||||
echo "Chowning /workspace to jenkins user"
|
|
||||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
|
||||||
'''
|
|
||||||
|
|
||||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
|
||||||
sh '''
|
|
||||||
bundleName=amd64-rootless
|
|
||||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
|
||||||
# exclude overlay2 directories
|
|
||||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
|
||||||
'''
|
|
||||||
|
|
||||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cleanup {
|
|
||||||
sh 'make clean'
|
|
||||||
deleteDir()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('cgroup2') {
|
|
||||||
when {
|
|
||||||
beforeAgent true
|
|
||||||
expression { params.cgroup2 }
|
|
||||||
}
|
|
||||||
agent { label 'amd64 && ubuntu-2004 && cgroup2' }
|
|
||||||
stages {
|
|
||||||
stage("Print info") {
|
|
||||||
steps {
|
|
||||||
sh 'docker version'
|
|
||||||
sh 'docker info'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage("Build dev image") {
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
docker build --force-rm --build-arg APT_MIRROR --build-arg SYSTEMD=true -t docker:${GIT_COMMIT} .
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stage("Integration tests") {
|
|
||||||
environment {
|
|
||||||
DOCKER_SYSTEMD = '1' // recommended cgroup driver for v2
|
|
||||||
TEST_SKIP_INTEGRATION_CLI = '1' // CLI tests do not support v2
|
|
||||||
}
|
|
||||||
steps {
|
|
||||||
sh '''
|
|
||||||
docker run --rm -t --privileged \
|
|
||||||
-v "$WORKSPACE/bundles:/go/src/github.com/docker/docker/bundles" \
|
|
||||||
--name docker-pr$BUILD_NUMBER \
|
|
||||||
-e DOCKER_GITCOMMIT=${GIT_COMMIT} \
|
|
||||||
-e DOCKER_GRAPHDRIVER \
|
|
||||||
-e DOCKER_EXPERIMENTAL \
|
|
||||||
-e DOCKER_SYSTEMD \
|
|
||||||
-e TEST_SKIP_INTEGRATION_CLI \
|
|
||||||
-e TIMEOUT \
|
|
||||||
-e VALIDATE_REPO=${GIT_URL} \
|
|
||||||
-e VALIDATE_BRANCH=${CHANGE_TARGET} \
|
|
||||||
docker:${GIT_COMMIT} \
|
|
||||||
hack/make.sh \
|
|
||||||
dynbinary \
|
|
||||||
test-integration
|
|
||||||
'''
|
|
||||||
}
|
|
||||||
post {
|
|
||||||
always {
|
|
||||||
junit testResults: 'bundles/**/*-report.xml', allowEmptyResults: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
post {
|
|
||||||
always {
|
|
||||||
sh '''
|
|
||||||
echo "Ensuring container killed."
|
|
||||||
docker rm -vf docker-pr$BUILD_NUMBER || true
|
|
||||||
'''
|
|
||||||
|
|
||||||
sh '''
|
|
||||||
echo "Chowning /workspace to jenkins user"
|
|
||||||
docker run --rm -v "$WORKSPACE:/workspace" busybox chown -R "$(id -u):$(id -g)" /workspace
|
|
||||||
'''
|
|
||||||
|
|
||||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE', message: 'Failed to create bundles.tar.gz') {
|
|
||||||
sh '''
|
|
||||||
bundleName=amd64-cgroup2
|
|
||||||
echo "Creating ${bundleName}-bundles.tar.gz"
|
|
||||||
# exclude overlay2 directories
|
|
||||||
find bundles -path '*/root/*overlay2' -prune -o -type f \\( -name '*-report.json' -o -name '*.log' -o -name '*.prof' -o -name '*-report.xml' \\) -print | xargs tar -czf ${bundleName}-bundles.tar.gz
|
|
||||||
'''
|
|
||||||
|
|
||||||
archiveArtifacts artifacts: '*-bundles.tar.gz', allowEmptyArchive: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cleanup {
|
|
||||||
sh 'make clean'
|
|
||||||
deleteDir()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
stage('s390x') {
|
stage('s390x') {
|
||||||
when {
|
when {
|
||||||
beforeAgent true
|
beforeAgent true
|
||||||
|
@ -753,12 +260,6 @@ pipeline {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
agent { label 'ppc64le-ubuntu-1604' }
|
agent { label 'ppc64le-ubuntu-1604' }
|
||||||
// ppc64le machines run on Docker 18.06, and buildkit has some
|
|
||||||
// bugs on that version. Build and use buildx instead.
|
|
||||||
environment {
|
|
||||||
USE_BUILDX = '1'
|
|
||||||
DOCKER_BUILDKIT = '0'
|
|
||||||
}
|
|
||||||
|
|
||||||
stages {
|
stages {
|
||||||
stage("Print info") {
|
stage("Print info") {
|
||||||
|
@ -775,8 +276,7 @@ pipeline {
|
||||||
stage("Build dev image") {
|
stage("Build dev image") {
|
||||||
steps {
|
steps {
|
||||||
sh '''
|
sh '''
|
||||||
make bundles/buildx
|
docker buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||||
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
|
||||||
'''
|
'''
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -872,12 +372,6 @@ pipeline {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
agent { label 'ppc64le-ubuntu-1604' }
|
agent { label 'ppc64le-ubuntu-1604' }
|
||||||
// ppc64le machines run on Docker 18.06, and buildkit has some
|
|
||||||
// bugs on that version. Build and use buildx instead.
|
|
||||||
environment {
|
|
||||||
USE_BUILDX = '1'
|
|
||||||
DOCKER_BUILDKIT = '0'
|
|
||||||
}
|
|
||||||
|
|
||||||
stages {
|
stages {
|
||||||
stage("Print info") {
|
stage("Print info") {
|
||||||
|
@ -894,8 +388,7 @@ pipeline {
|
||||||
stage("Build dev image") {
|
stage("Build dev image") {
|
||||||
steps {
|
steps {
|
||||||
sh '''
|
sh '''
|
||||||
make bundles/buildx
|
docker buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
||||||
bundles/buildx build --load --force-rm --build-arg APT_MIRROR -t docker:${GIT_COMMIT} .
|
|
||||||
'''
|
'''
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
62
Makefile
62
Makefile
|
@ -1,21 +1,7 @@
|
||||||
.PHONY: all binary dynbinary build cross help install manpages run shell test test-docker-py test-integration test-unit validate win
|
.PHONY: all binary dynbinary build cross help install manpages run shell test test-docker-py test-integration test-unit validate validate-% win
|
||||||
|
|
||||||
BUILDX_VERSION ?= v0.8.2
|
|
||||||
|
|
||||||
ifdef USE_BUILDX
|
|
||||||
BUILDX ?= $(shell command -v buildx)
|
|
||||||
BUILDX ?= $(shell command -v docker-buildx)
|
|
||||||
DOCKER_BUILDX_CLI_PLUGIN_PATH ?= ~/.docker/cli-plugins/docker-buildx
|
|
||||||
BUILDX ?= $(shell if [ -x "$(DOCKER_BUILDX_CLI_PLUGIN_PATH)" ]; then echo $(DOCKER_BUILDX_CLI_PLUGIN_PATH); fi)
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifndef USE_BUILDX
|
|
||||||
DOCKER_BUILDKIT := 1
|
|
||||||
export DOCKER_BUILDKIT
|
|
||||||
endif
|
|
||||||
|
|
||||||
BUILDX ?= bundles/buildx
|
|
||||||
DOCKER ?= docker
|
DOCKER ?= docker
|
||||||
|
BUILDX ?= $(DOCKER) buildx
|
||||||
|
|
||||||
# set the graph driver as the current graphdriver if not set
|
# set the graph driver as the current graphdriver if not set
|
||||||
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
|
DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
|
||||||
|
@ -69,10 +55,12 @@ DOCKER_ENVS := \
|
||||||
-e DOCKER_USERLANDPROXY \
|
-e DOCKER_USERLANDPROXY \
|
||||||
-e DOCKERD_ARGS \
|
-e DOCKERD_ARGS \
|
||||||
-e DELVE_PORT \
|
-e DELVE_PORT \
|
||||||
|
-e GITHUB_ACTIONS \
|
||||||
-e TEST_FORCE_VALIDATE \
|
-e TEST_FORCE_VALIDATE \
|
||||||
-e TEST_INTEGRATION_DIR \
|
-e TEST_INTEGRATION_DIR \
|
||||||
-e TEST_SKIP_INTEGRATION \
|
-e TEST_SKIP_INTEGRATION \
|
||||||
-e TEST_SKIP_INTEGRATION_CLI \
|
-e TEST_SKIP_INTEGRATION_CLI \
|
||||||
|
-e TESTCOVERAGE \
|
||||||
-e TESTDEBUG \
|
-e TESTDEBUG \
|
||||||
-e TESTDIRS \
|
-e TESTDIRS \
|
||||||
-e TESTFLAGS \
|
-e TESTFLAGS \
|
||||||
|
@ -118,7 +106,7 @@ DOCKER_IMAGE := docker-dev
|
||||||
DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",)
|
DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",)
|
||||||
DELVE_PORT_FORWARD := $(if $(DELVE_PORT),-p "$(DELVE_PORT)",)
|
DELVE_PORT_FORWARD := $(if $(DELVE_PORT),-p "$(DELVE_PORT)",)
|
||||||
|
|
||||||
DOCKER_FLAGS := $(DOCKER) run --rm -i --privileged $(DOCKER_CONTAINER_NAME) $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD) $(DELVE_PORT_FORWARD)
|
DOCKER_FLAGS := $(DOCKER) run --rm --privileged $(DOCKER_CONTAINER_NAME) $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD) $(DELVE_PORT_FORWARD)
|
||||||
BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR))
|
BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR))
|
||||||
export BUILD_APT_MIRROR
|
export BUILD_APT_MIRROR
|
||||||
|
|
||||||
|
@ -137,6 +125,14 @@ ifeq ($(INTERACTIVE), 1)
|
||||||
DOCKER_FLAGS += -t
|
DOCKER_FLAGS += -t
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
# on GitHub Runners input device is not a TTY but we allocate a pseudo-one,
|
||||||
|
# otherwise keep STDIN open even if not attached if not a GitHub Runner.
|
||||||
|
ifeq ($(GITHUB_ACTIONS),true)
|
||||||
|
DOCKER_FLAGS += -t
|
||||||
|
else
|
||||||
|
DOCKER_FLAGS += -i
|
||||||
|
endif
|
||||||
|
|
||||||
DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
|
DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
|
||||||
|
|
||||||
DOCKER_BUILD_ARGS += --build-arg=GO_VERSION
|
DOCKER_BUILD_ARGS += --build-arg=GO_VERSION
|
||||||
|
@ -145,12 +141,7 @@ DOCKER_BUILD_ARGS += --build-arg=SYSTEMD=true
|
||||||
endif
|
endif
|
||||||
|
|
||||||
BUILD_OPTS := ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS} -f "$(DOCKERFILE)"
|
BUILD_OPTS := ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} ${DOCKER_BUILD_OPTS} -f "$(DOCKERFILE)"
|
||||||
ifdef USE_BUILDX
|
|
||||||
BUILD_OPTS += $(BUILDX_BUILD_EXTRA_OPTS)
|
|
||||||
BUILD_CMD := $(BUILDX) build
|
BUILD_CMD := $(BUILDX) build
|
||||||
else
|
|
||||||
BUILD_CMD := $(DOCKER) build
|
|
||||||
endif
|
|
||||||
|
|
||||||
# This is used for the legacy "build" target and anything still depending on it
|
# This is used for the legacy "build" target and anything still depending on it
|
||||||
BUILD_CROSS =
|
BUILD_CROSS =
|
||||||
|
@ -168,14 +159,14 @@ default: binary
|
||||||
all: build ## validate all checks, build linux binaries, run all tests,\ncross build non-linux binaries, and generate archives
|
all: build ## validate all checks, build linux binaries, run all tests,\ncross build non-linux binaries, and generate archives
|
||||||
$(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh'
|
$(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh'
|
||||||
|
|
||||||
binary: buildx ## build statically linked linux binaries
|
binary: bundles ## build statically linked linux binaries
|
||||||
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
|
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
|
||||||
|
|
||||||
dynbinary: buildx ## build dynamically linked linux binaries
|
dynbinary: bundles ## build dynamically linked linux binaries
|
||||||
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
|
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
|
||||||
|
|
||||||
cross: BUILD_OPTS += --build-arg CROSS=true --build-arg DOCKER_CROSSPLATFORMS
|
cross: BUILD_OPTS += --build-arg CROSS=true --build-arg DOCKER_CROSSPLATFORMS
|
||||||
cross: buildx ## cross build the binaries for darwin, freebsd and\nwindows
|
cross: bundles ## cross build the binaries for darwin, freebsd and\nwindows
|
||||||
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
|
$(BUILD_CMD) $(BUILD_OPTS) --output=bundles/ --target=$@ $(VERSION_AUTOGEN_ARGS) .
|
||||||
|
|
||||||
bundles:
|
bundles:
|
||||||
|
@ -203,11 +194,8 @@ build: shell_target := --target=dev
|
||||||
else
|
else
|
||||||
build: shell_target := --target=final
|
build: shell_target := --target=final
|
||||||
endif
|
endif
|
||||||
ifdef USE_BUILDX
|
build: bundles
|
||||||
build: buildx_load := --load
|
$(BUILD_CMD) $(BUILD_OPTS) $(shell_target) --load $(BUILD_CROSS) -t "$(DOCKER_IMAGE)" .
|
||||||
endif
|
|
||||||
build: buildx
|
|
||||||
$(BUILD_CMD) $(BUILD_OPTS) $(shell_target) $(buildx_load) $(BUILD_CROSS) -t "$(DOCKER_IMAGE)" .
|
|
||||||
|
|
||||||
shell: build ## start a shell inside the build env
|
shell: build ## start a shell inside the build env
|
||||||
$(DOCKER_RUN_DOCKER) bash
|
$(DOCKER_RUN_DOCKER) bash
|
||||||
|
@ -237,6 +225,9 @@ test-unit: build ## run the unit tests
|
||||||
validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor
|
validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor
|
||||||
$(DOCKER_RUN_DOCKER) hack/validate/all
|
$(DOCKER_RUN_DOCKER) hack/validate/all
|
||||||
|
|
||||||
|
validate-%: build ## validate specific check
|
||||||
|
$(DOCKER_RUN_DOCKER) hack/validate/$*
|
||||||
|
|
||||||
win: build ## cross build the binary for windows
|
win: build ## cross build the binary for windows
|
||||||
$(DOCKER_RUN_DOCKER) DOCKER_CROSSPLATFORMS=windows/amd64 hack/make.sh cross
|
$(DOCKER_RUN_DOCKER) DOCKER_CROSSPLATFORMS=windows/amd64 hack/make.sh cross
|
||||||
|
|
||||||
|
@ -255,14 +246,3 @@ swagger-docs: ## preview the API documentation
|
||||||
-e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \
|
-e 'REDOC_OPTIONS=hide-hostname="true" lazy-rendering' \
|
||||||
-p $(SWAGGER_DOCS_PORT):80 \
|
-p $(SWAGGER_DOCS_PORT):80 \
|
||||||
bfirsh/redoc:1.14.0
|
bfirsh/redoc:1.14.0
|
||||||
|
|
||||||
.PHONY: buildx
|
|
||||||
ifdef USE_BUILDX
|
|
||||||
ifeq ($(BUILDX), bundles/buildx)
|
|
||||||
buildx: bundles/buildx ## build buildx cli tool
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
bundles/buildx: bundles ## build buildx CLI tool
|
|
||||||
curl -fsSL https://raw.githubusercontent.com/moby/buildkit/70deac12b5857a1aa4da65e90b262368e2f71500/hack/install-buildx | VERSION="$(BUILDX_VERSION)" BINDIR="$(@D)" bash
|
|
||||||
$@ version
|
|
||||||
|
|
|
@ -61,5 +61,4 @@ func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.
|
||||||
ctx = context.WithValue(ctx, httputils.APIVersionKey{}, apiVersion)
|
ctx = context.WithValue(ctx, httputils.APIVersionKey{}, apiVersion)
|
||||||
return handler(ctx, w, r, vars)
|
return handler(ctx, w, r, vars)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package build // import "github.com/docker/docker/api/server/router/build"
|
package build // import "github.com/docker/docker/api/server/router/build"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"runtime"
|
||||||
|
|
||||||
"github.com/docker/docker/api/server/router"
|
"github.com/docker/docker/api/server/router"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
)
|
)
|
||||||
|
@ -37,17 +39,24 @@ func (r *buildRouter) initRoutes() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuilderVersion derives the default docker builder version from the config
|
// BuilderVersion derives the default docker builder version from the config.
|
||||||
// Note: it is valid to have BuilderVersion unset which means it is up to the
|
//
|
||||||
// client to choose which builder to use.
|
// The default on Linux is version "2" (BuildKit), but the daemon can be
|
||||||
|
// configured to recommend version "1" (classic Builder). Windows does not
|
||||||
|
// yet support BuildKit for native Windows images, and uses "1" (classic builder)
|
||||||
|
// as a default.
|
||||||
|
//
|
||||||
|
// This value is only a recommendation as advertised by the daemon, and it is
|
||||||
|
// up to the client to choose which builder to use.
|
||||||
func BuilderVersion(features map[string]bool) types.BuilderVersion {
|
func BuilderVersion(features map[string]bool) types.BuilderVersion {
|
||||||
var bv types.BuilderVersion
|
// TODO(thaJeztah) move the default to daemon/config
|
||||||
if v, ok := features["buildkit"]; ok {
|
if runtime.GOOS == "windows" {
|
||||||
if v {
|
return types.BuilderV1
|
||||||
bv = types.BuilderBuildKit
|
}
|
||||||
} else {
|
|
||||||
bv = types.BuilderV1
|
bv := types.BuilderBuildKit
|
||||||
}
|
if v, ok := features["buildkit"]; ok && !v {
|
||||||
|
bv = types.BuilderV1
|
||||||
}
|
}
|
||||||
return bv
|
return bv
|
||||||
}
|
}
|
||||||
|
|
|
@ -238,7 +238,6 @@ func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *
|
||||||
defer func() { _ = output.Close() }()
|
defer func() { _ = output.Close() }()
|
||||||
|
|
||||||
errf := func(err error) error {
|
errf := func(err error) error {
|
||||||
|
|
||||||
if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 {
|
if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 {
|
||||||
_, _ = output.Write(notVerboseBuffer.Bytes())
|
_, _ = output.Write(notVerboseBuffer.Bytes())
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,5 +115,4 @@ func TestAdjustForAPIVersion(t *testing.T) {
|
||||||
if len(spec.TaskTemplate.ContainerSpec.Ulimits) != 0 {
|
if len(spec.TaskTemplate.ContainerSpec.Ulimits) != 0 {
|
||||||
t.Error("Ulimits were not stripped from spec")
|
t.Error("Ulimits were not stripped from spec")
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -174,6 +174,15 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
|
||||||
if versions.LessThan(version, "1.42") {
|
if versions.LessThan(version, "1.42") {
|
||||||
for _, b := range buildCache {
|
for _, b := range buildCache {
|
||||||
builderSize += b.Size
|
builderSize += b.Size
|
||||||
|
// Parents field was added in API 1.42 to replace the Parent field.
|
||||||
|
b.Parents = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if versions.GreaterThanOrEqualTo(version, "1.42") {
|
||||||
|
for _, b := range buildCache {
|
||||||
|
// Parent field is deprecated in API v1.42 and up, as it is deprecated
|
||||||
|
// in BuildKit. Empty the field to omit it in the API response.
|
||||||
|
b.Parent = "" //nolint:staticcheck // ignore SA1019 (Parent field is deprecated)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -162,11 +162,16 @@ func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter,
|
||||||
version := httputils.VersionFromContext(ctx)
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
|
||||||
err := v.backend.Remove(ctx, vars["name"], opts.WithPurgeOnError(force))
|
err := v.backend.Remove(ctx, vars["name"], opts.WithPurgeOnError(force))
|
||||||
if err != nil {
|
// when a removal is forced, if the volume does not exist, no error will be
|
||||||
if errdefs.IsNotFound(err) && versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) && v.cluster.IsManager() {
|
// returned. this means that to ensure forcing works on swarm volumes as
|
||||||
err := v.cluster.RemoveVolume(vars["name"], force)
|
// well, we should always also force remove against the cluster.
|
||||||
if err != nil {
|
if err != nil || force {
|
||||||
return err
|
if versions.GreaterThanOrEqualTo(version, clusterVolumesVersion) && v.cluster.IsManager() {
|
||||||
|
if errdefs.IsNotFound(err) || force {
|
||||||
|
err := v.cluster.RemoveVolume(vars["name"], force)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
|
@ -187,6 +192,12 @@ func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWrit
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// API version 1.42 changes behavior where prune should only prune anonymous volumes.
|
||||||
|
// To keep older API behavior working, we need to add this filter option to consider all (local) volumes for pruning, not just anonymous ones.
|
||||||
|
if versions.LessThan(httputils.VersionFromContext(ctx), "1.42") {
|
||||||
|
pruneFilters.Add("all", "true")
|
||||||
|
}
|
||||||
|
|
||||||
pruneReport, err := v.backend.Prune(ctx, pruneFilters)
|
pruneReport, err := v.backend.Prune(ctx, pruneFilters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -79,7 +79,7 @@ func TestGetVolumeByNameFoundRegular(t *testing.T) {
|
||||||
backend: &fakeVolumeBackend{
|
backend: &fakeVolumeBackend{
|
||||||
volumes: map[string]*volume.Volume{
|
volumes: map[string]*volume.Volume{
|
||||||
|
|
||||||
"volume1": &volume.Volume{
|
"volume1": {
|
||||||
Name: "volume1",
|
Name: "volume1",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -98,7 +98,7 @@ func TestGetVolumeByNameFoundSwarm(t *testing.T) {
|
||||||
swarm: true,
|
swarm: true,
|
||||||
manager: true,
|
manager: true,
|
||||||
volumes: map[string]*volume.Volume{
|
volumes: map[string]*volume.Volume{
|
||||||
"volume1": &volume.Volume{
|
"volume1": {
|
||||||
Name: "volume1",
|
Name: "volume1",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -112,16 +112,16 @@ func TestListVolumes(t *testing.T) {
|
||||||
v := &volumeRouter{
|
v := &volumeRouter{
|
||||||
backend: &fakeVolumeBackend{
|
backend: &fakeVolumeBackend{
|
||||||
volumes: map[string]*volume.Volume{
|
volumes: map[string]*volume.Volume{
|
||||||
"v1": &volume.Volume{Name: "v1"},
|
"v1": {Name: "v1"},
|
||||||
"v2": &volume.Volume{Name: "v2"},
|
"v2": {Name: "v2"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &fakeClusterBackend{
|
cluster: &fakeClusterBackend{
|
||||||
swarm: true,
|
swarm: true,
|
||||||
manager: true,
|
manager: true,
|
||||||
volumes: map[string]*volume.Volume{
|
volumes: map[string]*volume.Volume{
|
||||||
"v3": &volume.Volume{Name: "v3"},
|
"v3": {Name: "v3"},
|
||||||
"v4": &volume.Volume{Name: "v4"},
|
"v4": {Name: "v4"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -140,8 +140,8 @@ func TestListVolumesNoSwarm(t *testing.T) {
|
||||||
v := &volumeRouter{
|
v := &volumeRouter{
|
||||||
backend: &fakeVolumeBackend{
|
backend: &fakeVolumeBackend{
|
||||||
volumes: map[string]*volume.Volume{
|
volumes: map[string]*volume.Volume{
|
||||||
"v1": &volume.Volume{Name: "v1"},
|
"v1": {Name: "v1"},
|
||||||
"v2": &volume.Volume{Name: "v2"},
|
"v2": {Name: "v2"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &fakeClusterBackend{},
|
cluster: &fakeClusterBackend{},
|
||||||
|
@ -155,8 +155,8 @@ func TestListVolumesNoManager(t *testing.T) {
|
||||||
v := &volumeRouter{
|
v := &volumeRouter{
|
||||||
backend: &fakeVolumeBackend{
|
backend: &fakeVolumeBackend{
|
||||||
volumes: map[string]*volume.Volume{
|
volumes: map[string]*volume.Volume{
|
||||||
"v1": &volume.Volume{Name: "v1"},
|
"v1": {Name: "v1"},
|
||||||
"v2": &volume.Volume{Name: "v2"},
|
"v2": {Name: "v2"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
cluster: &fakeClusterBackend{swarm: true},
|
cluster: &fakeClusterBackend{swarm: true},
|
||||||
|
@ -318,7 +318,7 @@ func TestUpdateVolume(t *testing.T) {
|
||||||
swarm: true,
|
swarm: true,
|
||||||
manager: true,
|
manager: true,
|
||||||
volumes: map[string]*volume.Volume{
|
volumes: map[string]*volume.Volume{
|
||||||
"vol1": &volume.Volume{
|
"vol1": {
|
||||||
Name: "vo1",
|
Name: "vo1",
|
||||||
ClusterVolume: &volume.ClusterVolume{
|
ClusterVolume: &volume.ClusterVolume{
|
||||||
ID: "vol1",
|
ID: "vol1",
|
||||||
|
@ -409,7 +409,7 @@ func TestUpdateVolumeNotFound(t *testing.T) {
|
||||||
func TestVolumeRemove(t *testing.T) {
|
func TestVolumeRemove(t *testing.T) {
|
||||||
b := &fakeVolumeBackend{
|
b := &fakeVolumeBackend{
|
||||||
volumes: map[string]*volume.Volume{
|
volumes: map[string]*volume.Volume{
|
||||||
"vol1": &volume.Volume{
|
"vol1": {
|
||||||
Name: "vol1",
|
Name: "vol1",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -436,7 +436,7 @@ func TestVolumeRemoveSwarm(t *testing.T) {
|
||||||
swarm: true,
|
swarm: true,
|
||||||
manager: true,
|
manager: true,
|
||||||
volumes: map[string]*volume.Volume{
|
volumes: map[string]*volume.Volume{
|
||||||
"vol1": &volume.Volume{
|
"vol1": {
|
||||||
Name: "vol1",
|
Name: "vol1",
|
||||||
ClusterVolume: &volume.ClusterVolume{},
|
ClusterVolume: &volume.ClusterVolume{},
|
||||||
},
|
},
|
||||||
|
@ -494,7 +494,7 @@ func TestVolumeRemoveNotFoundNoManager(t *testing.T) {
|
||||||
func TestVolumeRemoveFoundNoSwarm(t *testing.T) {
|
func TestVolumeRemoveFoundNoSwarm(t *testing.T) {
|
||||||
b := &fakeVolumeBackend{
|
b := &fakeVolumeBackend{
|
||||||
volumes: map[string]*volume.Volume{
|
volumes: map[string]*volume.Volume{
|
||||||
"vol1": &volume.Volume{
|
"vol1": {
|
||||||
Name: "vol1",
|
Name: "vol1",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -518,7 +518,7 @@ func TestVolumeRemoveFoundNoSwarm(t *testing.T) {
|
||||||
func TestVolumeRemoveNoSwarmInUse(t *testing.T) {
|
func TestVolumeRemoveNoSwarmInUse(t *testing.T) {
|
||||||
b := &fakeVolumeBackend{
|
b := &fakeVolumeBackend{
|
||||||
volumes: map[string]*volume.Volume{
|
volumes: map[string]*volume.Volume{
|
||||||
"inuse": &volume.Volume{
|
"inuse": {
|
||||||
Name: "inuse",
|
Name: "inuse",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -544,7 +544,7 @@ func TestVolumeRemoveSwarmForce(t *testing.T) {
|
||||||
swarm: true,
|
swarm: true,
|
||||||
manager: true,
|
manager: true,
|
||||||
volumes: map[string]*volume.Volume{
|
volumes: map[string]*volume.Volume{
|
||||||
"vol1": &volume.Volume{
|
"vol1": {
|
||||||
Name: "vol1",
|
Name: "vol1",
|
||||||
ClusterVolume: &volume.ClusterVolume{},
|
ClusterVolume: &volume.ClusterVolume{},
|
||||||
Options: map[string]string{"mustforce": "yes"},
|
Options: map[string]string{"mustforce": "yes"},
|
||||||
|
@ -574,6 +574,7 @@ func TestVolumeRemoveSwarmForce(t *testing.T) {
|
||||||
|
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
assert.Equal(t, len(b.volumes), 0)
|
assert.Equal(t, len(b.volumes), 0)
|
||||||
|
assert.Equal(t, len(c.volumes), 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
type fakeVolumeBackend struct {
|
type fakeVolumeBackend struct {
|
||||||
|
@ -616,9 +617,16 @@ func (b *fakeVolumeBackend) Create(_ context.Context, name, driverName string, _
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *fakeVolumeBackend) Remove(_ context.Context, name string, _ ...opts.RemoveOption) error {
|
func (b *fakeVolumeBackend) Remove(_ context.Context, name string, o ...opts.RemoveOption) error {
|
||||||
|
removeOpts := &opts.RemoveConfig{}
|
||||||
|
for _, opt := range o {
|
||||||
|
opt(removeOpts)
|
||||||
|
}
|
||||||
|
|
||||||
if v, ok := b.volumes[name]; !ok {
|
if v, ok := b.volumes[name]; !ok {
|
||||||
return errdefs.NotFound(fmt.Errorf("volume %s not found", name))
|
if !removeOpts.PurgeOnError {
|
||||||
|
return errdefs.NotFound(fmt.Errorf("volume %s not found", name))
|
||||||
|
}
|
||||||
} else if v.Name == "inuse" {
|
} else if v.Name == "inuse" {
|
||||||
return errdefs.Conflict(fmt.Errorf("volume in use"))
|
return errdefs.Conflict(fmt.Errorf("volume in use"))
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/docker/docker/api/server/httpstatus"
|
"github.com/docker/docker/api/server/httpstatus"
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
|
@ -58,7 +59,8 @@ func (s *Server) Accept(addr string, listeners ...net.Listener) {
|
||||||
for _, listener := range listeners {
|
for _, listener := range listeners {
|
||||||
httpServer := &HTTPServer{
|
httpServer := &HTTPServer{
|
||||||
srv: &http.Server{
|
srv: &http.Server{
|
||||||
Addr: addr,
|
Addr: addr,
|
||||||
|
ReadHeaderTimeout: 5 * time.Minute, // "G112: Potential Slowloris Attack (gosec)"; not a real concern for our use, so setting a long timeout.
|
||||||
},
|
},
|
||||||
l: listener,
|
l: listener,
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@ info:
|
||||||
title: "Docker Engine API"
|
title: "Docker Engine API"
|
||||||
version: "1.42"
|
version: "1.42"
|
||||||
x-logo:
|
x-logo:
|
||||||
url: "https://docs.docker.com/images/logo-docker-main.png"
|
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
|
||||||
description: |
|
description: |
|
||||||
The Engine API is an HTTP API served by Docker Engine. It is the API the
|
The Engine API is an HTTP API served by Docker Engine. It is the API the
|
||||||
Docker client uses to communicate with the Engine, so everything the Docker
|
Docker client uses to communicate with the Engine, so everything the Docker
|
||||||
|
@ -214,12 +214,14 @@ definitions:
|
||||||
- `volume` a docker volume with the given `Name`.
|
- `volume` a docker volume with the given `Name`.
|
||||||
- `tmpfs` a `tmpfs`.
|
- `tmpfs` a `tmpfs`.
|
||||||
- `npipe` a named pipe from the host into the container.
|
- `npipe` a named pipe from the host into the container.
|
||||||
|
- `cluster` a Swarm cluster volume
|
||||||
type: "string"
|
type: "string"
|
||||||
enum:
|
enum:
|
||||||
- "bind"
|
- "bind"
|
||||||
- "volume"
|
- "volume"
|
||||||
- "tmpfs"
|
- "tmpfs"
|
||||||
- "npipe"
|
- "npipe"
|
||||||
|
- "cluster"
|
||||||
example: "volume"
|
example: "volume"
|
||||||
Name:
|
Name:
|
||||||
description: |
|
description: |
|
||||||
|
@ -350,12 +352,14 @@ definitions:
|
||||||
- `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.
|
- `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.
|
||||||
- `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.
|
- `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.
|
||||||
- `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container.
|
- `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container.
|
||||||
|
- `cluster` a Swarm cluster volume
|
||||||
type: "string"
|
type: "string"
|
||||||
enum:
|
enum:
|
||||||
- "bind"
|
- "bind"
|
||||||
- "volume"
|
- "volume"
|
||||||
- "tmpfs"
|
- "tmpfs"
|
||||||
- "npipe"
|
- "npipe"
|
||||||
|
- "cluster"
|
||||||
ReadOnly:
|
ReadOnly:
|
||||||
description: "Whether the mount should be read-only."
|
description: "Whether the mount should be read-only."
|
||||||
type: "boolean"
|
type: "boolean"
|
||||||
|
@ -2247,23 +2251,63 @@ definitions:
|
||||||
|
|
||||||
BuildCache:
|
BuildCache:
|
||||||
type: "object"
|
type: "object"
|
||||||
|
description: |
|
||||||
|
BuildCache contains information about a build cache record.
|
||||||
properties:
|
properties:
|
||||||
ID:
|
ID:
|
||||||
type: "string"
|
type: "string"
|
||||||
|
description: |
|
||||||
|
Unique ID of the build cache record.
|
||||||
|
example: "ndlpt0hhvkqcdfkputsk4cq9c"
|
||||||
Parent:
|
Parent:
|
||||||
|
description: |
|
||||||
|
ID of the parent build cache record.
|
||||||
|
|
||||||
|
> **Deprecated**: This field is deprecated, and omitted if empty.
|
||||||
type: "string"
|
type: "string"
|
||||||
|
x-nullable: true
|
||||||
|
example: ""
|
||||||
|
Parents:
|
||||||
|
description: |
|
||||||
|
List of parent build cache record IDs.
|
||||||
|
type: "array"
|
||||||
|
items:
|
||||||
|
type: "string"
|
||||||
|
x-nullable: true
|
||||||
|
example: ["hw53o5aio51xtltp5xjp8v7fx"]
|
||||||
Type:
|
Type:
|
||||||
type: "string"
|
type: "string"
|
||||||
|
description: |
|
||||||
|
Cache record type.
|
||||||
|
example: "regular"
|
||||||
|
# see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84
|
||||||
|
enum:
|
||||||
|
- "internal"
|
||||||
|
- "frontend"
|
||||||
|
- "source.local"
|
||||||
|
- "source.git.checkout"
|
||||||
|
- "exec.cachemount"
|
||||||
|
- "regular"
|
||||||
Description:
|
Description:
|
||||||
type: "string"
|
type: "string"
|
||||||
|
description: |
|
||||||
|
Description of the build-step that produced the build cache.
|
||||||
|
example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache"
|
||||||
InUse:
|
InUse:
|
||||||
type: "boolean"
|
type: "boolean"
|
||||||
|
description: |
|
||||||
|
Indicates if the build cache is in use.
|
||||||
|
example: false
|
||||||
Shared:
|
Shared:
|
||||||
type: "boolean"
|
type: "boolean"
|
||||||
|
description: |
|
||||||
|
Indicates if the build cache is shared.
|
||||||
|
example: true
|
||||||
Size:
|
Size:
|
||||||
description: |
|
description: |
|
||||||
Amount of disk space used by the build cache (in bytes).
|
Amount of disk space used by the build cache (in bytes).
|
||||||
type: "integer"
|
type: "integer"
|
||||||
|
example: 51
|
||||||
CreatedAt:
|
CreatedAt:
|
||||||
description: |
|
description: |
|
||||||
Date and time at which the build cache was created in
|
Date and time at which the build cache was created in
|
||||||
|
@ -2281,6 +2325,7 @@ definitions:
|
||||||
example: "2017-08-09T07:09:37.632105588Z"
|
example: "2017-08-09T07:09:37.632105588Z"
|
||||||
UsageCount:
|
UsageCount:
|
||||||
type: "integer"
|
type: "integer"
|
||||||
|
example: 26
|
||||||
|
|
||||||
ImageID:
|
ImageID:
|
||||||
type: "object"
|
type: "object"
|
||||||
|
@ -6210,6 +6255,28 @@ paths:
|
||||||
`/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`.
|
`/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`.
|
||||||
type: "string"
|
type: "string"
|
||||||
pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$"
|
pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$"
|
||||||
|
- name: "platform"
|
||||||
|
in: "query"
|
||||||
|
description: |
|
||||||
|
Platform in the format `os[/arch[/variant]]` used for image lookup.
|
||||||
|
|
||||||
|
When specified, the daemon checks if the requested image is present
|
||||||
|
in the local image cache with the given OS and Architecture, and
|
||||||
|
otherwise returns a `404` status.
|
||||||
|
|
||||||
|
If the option is not set, the host's native OS and Architecture are
|
||||||
|
used to look up the image in the image cache. However, if no platform
|
||||||
|
is passed and the given image does exist in the local image cache,
|
||||||
|
but its OS or architecture does not match, the container is created
|
||||||
|
with the available image, and a warning is added to the `Warnings`
|
||||||
|
field in the response, for example;
|
||||||
|
|
||||||
|
WARNING: The requested image's platform (linux/arm64/v8) does not
|
||||||
|
match the detected host platform (linux/amd64) and no
|
||||||
|
specific platform was requested
|
||||||
|
|
||||||
|
type: "string"
|
||||||
|
default: ""
|
||||||
- name: "body"
|
- name: "body"
|
||||||
in: "body"
|
in: "body"
|
||||||
description: "Container to create"
|
description: "Container to create"
|
||||||
|
@ -8719,7 +8786,17 @@ paths:
|
||||||
description: "Max API Version the server supports"
|
description: "Max API Version the server supports"
|
||||||
Builder-Version:
|
Builder-Version:
|
||||||
type: "string"
|
type: "string"
|
||||||
description: "Default version of docker image builder"
|
description: |
|
||||||
|
Default version of docker image builder
|
||||||
|
|
||||||
|
The default on Linux is version "2" (BuildKit), but the daemon
|
||||||
|
can be configured to recommend version "1" (classic Builder).
|
||||||
|
Windows does not yet support BuildKit for native Windows images,
|
||||||
|
and uses "1" (classic builder) as a default.
|
||||||
|
|
||||||
|
This value is a recommendation as advertised by the daemon, and
|
||||||
|
it is up to the client to choose which builder to use.
|
||||||
|
default: "2"
|
||||||
Docker-Experimental:
|
Docker-Experimental:
|
||||||
type: "boolean"
|
type: "boolean"
|
||||||
description: "If the server is running with experimental mode enabled"
|
description: "If the server is running with experimental mode enabled"
|
||||||
|
@ -9013,7 +9090,7 @@ paths:
|
||||||
BuildCache:
|
BuildCache:
|
||||||
-
|
-
|
||||||
ID: "hw53o5aio51xtltp5xjp8v7fx"
|
ID: "hw53o5aio51xtltp5xjp8v7fx"
|
||||||
Parent: ""
|
Parents: []
|
||||||
Type: "regular"
|
Type: "regular"
|
||||||
Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0"
|
Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0"
|
||||||
InUse: false
|
InUse: false
|
||||||
|
@ -9024,7 +9101,7 @@ paths:
|
||||||
UsageCount: 26
|
UsageCount: 26
|
||||||
-
|
-
|
||||||
ID: "ndlpt0hhvkqcdfkputsk4cq9c"
|
ID: "ndlpt0hhvkqcdfkputsk4cq9c"
|
||||||
Parent: "hw53o5aio51xtltp5xjp8v7fx"
|
Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"]
|
||||||
Type: "regular"
|
Type: "regular"
|
||||||
Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache"
|
Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache"
|
||||||
InUse: false
|
InUse: false
|
||||||
|
@ -9622,6 +9699,7 @@ paths:
|
||||||
|
|
||||||
Available filters:
|
Available filters:
|
||||||
- `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels.
|
- `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels.
|
||||||
|
- `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes.
|
||||||
type: "string"
|
type: "string"
|
||||||
responses:
|
responses:
|
||||||
200:
|
200:
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
/*Package filters provides tools for encoding a mapping of keys to a set of
|
/*
|
||||||
|
Package filters provides tools for encoding a mapping of keys to a set of
|
||||||
multiple values.
|
multiple values.
|
||||||
*/
|
*/
|
||||||
package filters // import "github.com/docker/docker/api/types/filters"
|
package filters // import "github.com/docker/docker/api/types/filters"
|
||||||
|
|
|
@ -18,7 +18,7 @@ const (
|
||||||
// TypeNamedPipe is the type for mounting Windows named pipes
|
// TypeNamedPipe is the type for mounting Windows named pipes
|
||||||
TypeNamedPipe Type = "npipe"
|
TypeNamedPipe Type = "npipe"
|
||||||
// TypeCluster is the type for Swarm Cluster Volumes.
|
// TypeCluster is the type for Swarm Cluster Volumes.
|
||||||
TypeCluster = "csi"
|
TypeCluster Type = "cluster"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Mount represents a mount (volume).
|
// Mount represents a mount (volume).
|
||||||
|
|
|
@ -45,31 +45,32 @@ func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
|
||||||
// IndexInfo contains information about a registry
|
// IndexInfo contains information about a registry
|
||||||
//
|
//
|
||||||
// RepositoryInfo Examples:
|
// RepositoryInfo Examples:
|
||||||
// {
|
|
||||||
// "Index" : {
|
|
||||||
// "Name" : "docker.io",
|
|
||||||
// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
|
|
||||||
// "Secure" : true,
|
|
||||||
// "Official" : true,
|
|
||||||
// },
|
|
||||||
// "RemoteName" : "library/debian",
|
|
||||||
// "LocalName" : "debian",
|
|
||||||
// "CanonicalName" : "docker.io/debian"
|
|
||||||
// "Official" : true,
|
|
||||||
// }
|
|
||||||
//
|
//
|
||||||
// {
|
// {
|
||||||
// "Index" : {
|
// "Index" : {
|
||||||
// "Name" : "127.0.0.1:5000",
|
// "Name" : "docker.io",
|
||||||
// "Mirrors" : [],
|
// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
|
||||||
// "Secure" : false,
|
// "Secure" : true,
|
||||||
// "Official" : false,
|
// "Official" : true,
|
||||||
// },
|
// },
|
||||||
// "RemoteName" : "user/repo",
|
// "RemoteName" : "library/debian",
|
||||||
// "LocalName" : "127.0.0.1:5000/user/repo",
|
// "LocalName" : "debian",
|
||||||
// "CanonicalName" : "127.0.0.1:5000/user/repo",
|
// "CanonicalName" : "docker.io/debian"
|
||||||
// "Official" : false,
|
// "Official" : true,
|
||||||
// }
|
// }
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "Index" : {
|
||||||
|
// "Name" : "127.0.0.1:5000",
|
||||||
|
// "Mirrors" : [],
|
||||||
|
// "Secure" : false,
|
||||||
|
// "Official" : false,
|
||||||
|
// },
|
||||||
|
// "RemoteName" : "user/repo",
|
||||||
|
// "LocalName" : "127.0.0.1:5000/user/repo",
|
||||||
|
// "CanonicalName" : "127.0.0.1:5000/user/repo",
|
||||||
|
// "Official" : false,
|
||||||
|
// }
|
||||||
type IndexInfo struct {
|
type IndexInfo struct {
|
||||||
// Name is the name of the registry, such as "docker.io"
|
// Name is the name of the registry, such as "docker.io"
|
||||||
Name string
|
Name string
|
||||||
|
|
|
@ -33,17 +33,16 @@ func TestStrSliceUnmarshalJSON(t *testing.T) {
|
||||||
"[]": {},
|
"[]": {},
|
||||||
`["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"},
|
`["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"},
|
||||||
}
|
}
|
||||||
for json, expectedParts := range parts {
|
for input, expected := range parts {
|
||||||
strs := StrSlice{"default", "values"}
|
strs := StrSlice{"default", "values"}
|
||||||
if err := strs.UnmarshalJSON([]byte(json)); err != nil {
|
if err := strs.UnmarshalJSON([]byte(input)); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
actualParts := []string(strs)
|
actualParts := []string(strs)
|
||||||
if !reflect.DeepEqual(actualParts, expectedParts) {
|
if !reflect.DeepEqual(actualParts, expected) {
|
||||||
t.Fatalf("%#v: expected %v, got %v", json, expectedParts, actualParts)
|
t.Fatalf("%#v: expected %v, got %v", input, expected, actualParts)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -100,8 +100,10 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
|
||||||
// if the incoming nanosecond portion is longer or shorter than 9 digits it is
|
// if the incoming nanosecond portion is longer or shorter than 9 digits it is
|
||||||
// converted to nanoseconds. The expectation is that the seconds and
|
// converted to nanoseconds. The expectation is that the seconds and
|
||||||
// seconds will be used to create a time variable. For example:
|
// seconds will be used to create a time variable. For example:
|
||||||
// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
|
//
|
||||||
// if err == nil since := time.Unix(seconds, nanoseconds)
|
// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
|
||||||
|
// if err == nil since := time.Unix(seconds, nanoseconds)
|
||||||
|
//
|
||||||
// returns seconds as def(aultSeconds) if value == ""
|
// returns seconds as def(aultSeconds) if value == ""
|
||||||
func ParseTimestamps(value string, def int64) (int64, int64, error) {
|
func ParseTimestamps(value string, def int64) (int64, int64, error) {
|
||||||
if value == "" {
|
if value == "" {
|
||||||
|
|
|
@ -774,18 +774,31 @@ type BuildResult struct {
|
||||||
ID string
|
ID string
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildCache contains information about a build cache record
|
// BuildCache contains information about a build cache record.
|
||||||
type BuildCache struct {
|
type BuildCache struct {
|
||||||
ID string
|
// ID is the unique ID of the build cache record.
|
||||||
Parent string
|
ID string
|
||||||
Type string
|
// Parent is the ID of the parent build cache record.
|
||||||
|
//
|
||||||
|
// Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead.
|
||||||
|
Parent string `json:"Parent,omitempty"`
|
||||||
|
// Parents is the list of parent build cache record IDs.
|
||||||
|
Parents []string `json:" Parents,omitempty"`
|
||||||
|
// Type is the cache record type.
|
||||||
|
Type string
|
||||||
|
// Description is a description of the build-step that produced the build cache.
|
||||||
Description string
|
Description string
|
||||||
InUse bool
|
// InUse indicates if the build cache is in use.
|
||||||
Shared bool
|
InUse bool
|
||||||
Size int64
|
// Shared indicates if the build cache is shared.
|
||||||
CreatedAt time.Time
|
Shared bool
|
||||||
LastUsedAt *time.Time
|
// Size is the amount of disk space used by the build cache (in bytes).
|
||||||
UsageCount int
|
Size int64
|
||||||
|
// CreatedAt is the date and time at which the build cache was created.
|
||||||
|
CreatedAt time.Time
|
||||||
|
// LastUsedAt is the date and time at which the build cache was last used.
|
||||||
|
LastUsedAt *time.Time
|
||||||
|
UsageCount int
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildCachePruneOptions hold parameters to prune the build cache
|
// BuildCachePruneOptions hold parameters to prune the build cache
|
||||||
|
|
|
@ -104,7 +104,7 @@ type AccessMode struct {
|
||||||
BlockVolume *TypeBlock `json:",omitempty"`
|
BlockVolume *TypeBlock `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scope defines the Scope of a CSI Volume. This is how many nodes a
|
// Scope defines the Scope of a Cluster Volume. This is how many nodes a
|
||||||
// Volume can be accessed simultaneously on.
|
// Volume can be accessed simultaneously on.
|
||||||
type Scope string
|
type Scope string
|
||||||
|
|
||||||
|
@ -118,7 +118,7 @@ const (
|
||||||
ScopeMultiNode Scope = "multi"
|
ScopeMultiNode Scope = "multi"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SharingMode defines the Sharing of a CSI Volume. This is how Tasks using a
|
// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a
|
||||||
// Volume at the same time can use it.
|
// Volume at the same time can use it.
|
||||||
type SharingMode string
|
type SharingMode string
|
||||||
|
|
||||||
|
|
|
@ -177,7 +177,7 @@ func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session
|
||||||
p := &puller{
|
p := &puller{
|
||||||
src: imageIdentifier,
|
src: imageIdentifier,
|
||||||
is: is,
|
is: is,
|
||||||
//resolver: is.getResolver(is.RegistryHosts, imageIdentifier.Reference.String(), sm, g),
|
// resolver: is.getResolver(is.RegistryHosts, imageIdentifier.Reference.String(), sm, g),
|
||||||
platform: platform,
|
platform: platform,
|
||||||
sm: sm,
|
sm: sm,
|
||||||
}
|
}
|
||||||
|
@ -439,7 +439,6 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.Immutable
|
||||||
// TODO: Optimize to do dispatch and integrate pulling with download manager,
|
// TODO: Optimize to do dispatch and integrate pulling with download manager,
|
||||||
// leverage existing blob mapping and layer storage
|
// leverage existing blob mapping and layer storage
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
// TODO: need a wrapper snapshot interface that combines content
|
// TODO: need a wrapper snapshot interface that combines content
|
||||||
// and snapshots as 1) buildkit shouldn't have a dependency on contentstore
|
// and snapshots as 1) buildkit shouldn't have a dependency on contentstore
|
||||||
// or 2) cachemanager should manage the contentstore
|
// or 2) cachemanager should manage the contentstore
|
||||||
|
|
|
@ -24,7 +24,6 @@ import (
|
||||||
|
|
||||||
// ResolveCacheImporterFunc returns a resolver function for local inline cache
|
// ResolveCacheImporterFunc returns a resolver function for local inline cache
|
||||||
func ResolveCacheImporterFunc(sm *session.Manager, resolverFunc docker.RegistryHosts, cs content.Store, rs reference.Store, is imagestore.Store) remotecache.ResolveCacheImporterFunc {
|
func ResolveCacheImporterFunc(sm *session.Manager, resolverFunc docker.RegistryHosts, cs content.Store, rs reference.Store, is imagestore.Store) remotecache.ResolveCacheImporterFunc {
|
||||||
|
|
||||||
upstream := registryremotecache.ResolveCacheImporterFunc(sm, cs, resolverFunc)
|
upstream := registryremotecache.ResolveCacheImporterFunc(sm, cs, resolverFunc)
|
||||||
|
|
||||||
return func(ctx context.Context, group session.Group, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) {
|
return func(ctx context.Context, group session.Group, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) {
|
||||||
|
|
|
@ -129,7 +129,8 @@ func (b *Builder) DiskUsage(ctx context.Context) ([]*types.BuildCache, error) {
|
||||||
for _, r := range duResp.Record {
|
for _, r := range duResp.Record {
|
||||||
items = append(items, &types.BuildCache{
|
items = append(items, &types.BuildCache{
|
||||||
ID: r.ID,
|
ID: r.ID,
|
||||||
Parent: r.Parent,
|
Parent: r.Parent, //nolint:staticcheck // ignore SA1019 (Parent field is deprecated)
|
||||||
|
Parents: r.Parents,
|
||||||
Type: r.RecordType,
|
Type: r.RecordType,
|
||||||
Description: r.Description,
|
Description: r.Description,
|
||||||
InUse: r.InUse,
|
InUse: r.InUse,
|
||||||
|
|
|
@ -22,11 +22,10 @@ import (
|
||||||
|
|
||||||
func emptyImageConfig() ([]byte, error) {
|
func emptyImageConfig() ([]byte, error) {
|
||||||
pl := platforms.Normalize(platforms.DefaultSpec())
|
pl := platforms.Normalize(platforms.DefaultSpec())
|
||||||
img := ocispec.Image{
|
img := ocispec.Image{}
|
||||||
Architecture: pl.Architecture,
|
img.Architecture = pl.Architecture
|
||||||
OS: pl.OS,
|
img.OS = pl.OS
|
||||||
Variant: pl.Variant,
|
img.Variant = pl.Variant
|
||||||
}
|
|
||||||
img.RootFS.Type = "layers"
|
img.RootFS.Type = "layers"
|
||||||
img.Config.WorkingDir = "/"
|
img.Config.WorkingDir = "/"
|
||||||
img.Config.Env = []string{"PATH=" + system.DefaultPathEnv(pl.OS)}
|
img.Config.Env = []string{"PATH=" + system.DefaultPathEnv(pl.OS)}
|
||||||
|
|
|
@ -295,7 +295,6 @@ func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions.
|
||||||
}
|
}
|
||||||
dispatchRequest.state.updateRunConfig()
|
dispatchRequest.state.updateRunConfig()
|
||||||
fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID))
|
fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID))
|
||||||
|
|
||||||
}
|
}
|
||||||
if err := emitImageID(b.Aux, dispatchRequest.state); err != nil {
|
if err := emitImageID(b.Aux, dispatchRequest.state); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -105,7 +105,6 @@ func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, i
|
||||||
imageSource: imageSource,
|
imageSource: imageSource,
|
||||||
platform: platform,
|
platform: platform,
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *copier) createCopyInstruction(sourcesAndDest instructions.SourcesAndDest, cmdName string) (copyInstruction, error) {
|
func (o *copier) createCopyInstruction(sourcesAndDest instructions.SourcesAndDest, cmdName string) (copyInstruction, error) {
|
||||||
|
|
|
@ -35,7 +35,6 @@ import (
|
||||||
//
|
//
|
||||||
// Sets the environment variable foo to bar, also makes interpolation
|
// Sets the environment variable foo to bar, also makes interpolation
|
||||||
// in the dockerfile available from the next statement on via ${foo}.
|
// in the dockerfile available from the next statement on via ${foo}.
|
||||||
//
|
|
||||||
func dispatchEnv(d dispatchRequest, c *instructions.EnvCommand) error {
|
func dispatchEnv(d dispatchRequest, c *instructions.EnvCommand) error {
|
||||||
runConfig := d.state.runConfig
|
runConfig := d.state.runConfig
|
||||||
commitMessage := bytes.NewBufferString("ENV")
|
commitMessage := bytes.NewBufferString("ENV")
|
||||||
|
@ -65,7 +64,6 @@ func dispatchEnv(d dispatchRequest, c *instructions.EnvCommand) error {
|
||||||
//
|
//
|
||||||
// Sets the maintainer metadata.
|
// Sets the maintainer metadata.
|
||||||
func dispatchMaintainer(d dispatchRequest, c *instructions.MaintainerCommand) error {
|
func dispatchMaintainer(d dispatchRequest, c *instructions.MaintainerCommand) error {
|
||||||
|
|
||||||
d.state.maintainer = c.Maintainer
|
d.state.maintainer = c.Maintainer
|
||||||
return d.builder.commit(d.state, "MAINTAINER "+c.Maintainer)
|
return d.builder.commit(d.state, "MAINTAINER "+c.Maintainer)
|
||||||
}
|
}
|
||||||
|
@ -73,7 +71,6 @@ func dispatchMaintainer(d dispatchRequest, c *instructions.MaintainerCommand) er
|
||||||
// LABEL some json data describing the image
|
// LABEL some json data describing the image
|
||||||
//
|
//
|
||||||
// Sets the Label variable foo to bar,
|
// Sets the Label variable foo to bar,
|
||||||
//
|
|
||||||
func dispatchLabel(d dispatchRequest, c *instructions.LabelCommand) error {
|
func dispatchLabel(d dispatchRequest, c *instructions.LabelCommand) error {
|
||||||
if d.state.runConfig.Labels == nil {
|
if d.state.runConfig.Labels == nil {
|
||||||
d.state.runConfig.Labels = make(map[string]string)
|
d.state.runConfig.Labels = make(map[string]string)
|
||||||
|
@ -90,7 +87,6 @@ func dispatchLabel(d dispatchRequest, c *instructions.LabelCommand) error {
|
||||||
//
|
//
|
||||||
// Add the file 'foo' to '/path'. Tarball and Remote URL (http, https) handling
|
// Add the file 'foo' to '/path'. Tarball and Remote URL (http, https) handling
|
||||||
// exist here. If you do not wish to have this automatic handling, use COPY.
|
// exist here. If you do not wish to have this automatic handling, use COPY.
|
||||||
//
|
|
||||||
func dispatchAdd(d dispatchRequest, c *instructions.AddCommand) error {
|
func dispatchAdd(d dispatchRequest, c *instructions.AddCommand) error {
|
||||||
if c.Chmod != "" {
|
if c.Chmod != "" {
|
||||||
return errors.New("the --chmod option requires BuildKit. Refer to https://docs.docker.com/go/buildkit/ to learn how to build images with BuildKit enabled")
|
return errors.New("the --chmod option requires BuildKit. Refer to https://docs.docker.com/go/buildkit/ to learn how to build images with BuildKit enabled")
|
||||||
|
@ -112,7 +108,6 @@ func dispatchAdd(d dispatchRequest, c *instructions.AddCommand) error {
|
||||||
// COPY foo /path
|
// COPY foo /path
|
||||||
//
|
//
|
||||||
// Same as 'ADD' but without the tar and remote url handling.
|
// Same as 'ADD' but without the tar and remote url handling.
|
||||||
//
|
|
||||||
func dispatchCopy(d dispatchRequest, c *instructions.CopyCommand) error {
|
func dispatchCopy(d dispatchRequest, c *instructions.CopyCommand) error {
|
||||||
if c.Chmod != "" {
|
if c.Chmod != "" {
|
||||||
return errors.New("the --chmod option requires BuildKit. Refer to https://docs.docker.com/go/buildkit/ to learn how to build images with BuildKit enabled")
|
return errors.New("the --chmod option requires BuildKit. Refer to https://docs.docker.com/go/buildkit/ to learn how to build images with BuildKit enabled")
|
||||||
|
@ -157,7 +152,6 @@ func (d *dispatchRequest) getImageMount(imageRefOrID string) (*imageMount, error
|
||||||
}
|
}
|
||||||
|
|
||||||
// FROM [--platform=platform] imagename[:tag | @digest] [AS build-stage-name]
|
// FROM [--platform=platform] imagename[:tag | @digest] [AS build-stage-name]
|
||||||
//
|
|
||||||
func initializeStage(d dispatchRequest, cmd *instructions.Stage) error {
|
func initializeStage(d dispatchRequest, cmd *instructions.Stage) error {
|
||||||
d.builder.imageProber.Reset()
|
d.builder.imageProber.Reset()
|
||||||
|
|
||||||
|
@ -290,7 +284,6 @@ func dispatchOnbuild(d dispatchRequest, c *instructions.OnbuildCommand) error {
|
||||||
// WORKDIR /tmp
|
// WORKDIR /tmp
|
||||||
//
|
//
|
||||||
// Set the working directory for future RUN/CMD/etc statements.
|
// Set the working directory for future RUN/CMD/etc statements.
|
||||||
//
|
|
||||||
func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error {
|
func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error {
|
||||||
runConfig := d.state.runConfig
|
runConfig := d.state.runConfig
|
||||||
var err error
|
var err error
|
||||||
|
@ -333,7 +326,6 @@ func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error {
|
||||||
// RUN echo hi # sh -c echo hi (Linux and LCOW)
|
// RUN echo hi # sh -c echo hi (Linux and LCOW)
|
||||||
// RUN echo hi # cmd /S /C echo hi (Windows)
|
// RUN echo hi # cmd /S /C echo hi (Windows)
|
||||||
// RUN [ "echo", "hi" ] # echo hi
|
// RUN [ "echo", "hi" ] # echo hi
|
||||||
//
|
|
||||||
func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error {
|
func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error {
|
||||||
if !system.IsOSSupported(d.state.operatingSystem) {
|
if !system.IsOSSupported(d.state.operatingSystem) {
|
||||||
return system.ErrNotSupportedOperatingSystem
|
return system.ErrNotSupportedOperatingSystem
|
||||||
|
@ -428,7 +420,6 @@ func prependEnvOnCmd(buildArgs *BuildArgs, buildArgVars []string, cmd strslice.S
|
||||||
//
|
//
|
||||||
// Set the default command to run in the container (which may be empty).
|
// Set the default command to run in the container (which may be empty).
|
||||||
// Argument handling is the same as RUN.
|
// Argument handling is the same as RUN.
|
||||||
//
|
|
||||||
func dispatchCmd(d dispatchRequest, c *instructions.CmdCommand) error {
|
func dispatchCmd(d dispatchRequest, c *instructions.CmdCommand) error {
|
||||||
runConfig := d.state.runConfig
|
runConfig := d.state.runConfig
|
||||||
cmd, argsEscaped := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem, c.Name(), c.String())
|
cmd, argsEscaped := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem, c.Name(), c.String())
|
||||||
|
@ -459,7 +450,6 @@ func dispatchCmd(d dispatchRequest, c *instructions.CmdCommand) error {
|
||||||
//
|
//
|
||||||
// Set the default healthcheck command to run in the container (which may be empty).
|
// Set the default healthcheck command to run in the container (which may be empty).
|
||||||
// Argument handling is the same as RUN.
|
// Argument handling is the same as RUN.
|
||||||
//
|
|
||||||
func dispatchHealthcheck(d dispatchRequest, c *instructions.HealthCheckCommand) error {
|
func dispatchHealthcheck(d dispatchRequest, c *instructions.HealthCheckCommand) error {
|
||||||
runConfig := d.state.runConfig
|
runConfig := d.state.runConfig
|
||||||
if runConfig.Healthcheck != nil {
|
if runConfig.Healthcheck != nil {
|
||||||
|
@ -479,7 +469,6 @@ func dispatchHealthcheck(d dispatchRequest, c *instructions.HealthCheckCommand)
|
||||||
//
|
//
|
||||||
// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint
|
// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint
|
||||||
// is initialized at newBuilder time instead of through argument parsing.
|
// is initialized at newBuilder time instead of through argument parsing.
|
||||||
//
|
|
||||||
func dispatchEntrypoint(d dispatchRequest, c *instructions.EntrypointCommand) error {
|
func dispatchEntrypoint(d dispatchRequest, c *instructions.EntrypointCommand) error {
|
||||||
runConfig := d.state.runConfig
|
runConfig := d.state.runConfig
|
||||||
cmd, argsEscaped := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem, c.Name(), c.String())
|
cmd, argsEscaped := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem, c.Name(), c.String())
|
||||||
|
@ -509,7 +498,6 @@ func dispatchEntrypoint(d dispatchRequest, c *instructions.EntrypointCommand) er
|
||||||
//
|
//
|
||||||
// Expose ports for links and port mappings. This all ends up in
|
// Expose ports for links and port mappings. This all ends up in
|
||||||
// req.runConfig.ExposedPorts for runconfig.
|
// req.runConfig.ExposedPorts for runconfig.
|
||||||
//
|
|
||||||
func dispatchExpose(d dispatchRequest, c *instructions.ExposeCommand, envs []string) error {
|
func dispatchExpose(d dispatchRequest, c *instructions.ExposeCommand, envs []string) error {
|
||||||
// custom multi word expansion
|
// custom multi word expansion
|
||||||
// expose $FOO with FOO="80 443" is expanded as EXPOSE [80,443]. This is the only command supporting word to words expansion
|
// expose $FOO with FOO="80 443" is expanded as EXPOSE [80,443]. This is the only command supporting word to words expansion
|
||||||
|
@ -543,7 +531,6 @@ func dispatchExpose(d dispatchRequest, c *instructions.ExposeCommand, envs []str
|
||||||
//
|
//
|
||||||
// Set the user to 'foo' for future commands and when running the
|
// Set the user to 'foo' for future commands and when running the
|
||||||
// ENTRYPOINT/CMD at container run time.
|
// ENTRYPOINT/CMD at container run time.
|
||||||
//
|
|
||||||
func dispatchUser(d dispatchRequest, c *instructions.UserCommand) error {
|
func dispatchUser(d dispatchRequest, c *instructions.UserCommand) error {
|
||||||
d.state.runConfig.User = c.User
|
d.state.runConfig.User = c.User
|
||||||
return d.builder.commit(d.state, fmt.Sprintf("USER %v", c.User))
|
return d.builder.commit(d.state, fmt.Sprintf("USER %v", c.User))
|
||||||
|
@ -552,7 +539,6 @@ func dispatchUser(d dispatchRequest, c *instructions.UserCommand) error {
|
||||||
// VOLUME /foo
|
// VOLUME /foo
|
||||||
//
|
//
|
||||||
// Expose the volume /foo for use. Will also accept the JSON array form.
|
// Expose the volume /foo for use. Will also accept the JSON array form.
|
||||||
//
|
|
||||||
func dispatchVolume(d dispatchRequest, c *instructions.VolumeCommand) error {
|
func dispatchVolume(d dispatchRequest, c *instructions.VolumeCommand) error {
|
||||||
if d.state.runConfig.Volumes == nil {
|
if d.state.runConfig.Volumes == nil {
|
||||||
d.state.runConfig.Volumes = map[string]struct{}{}
|
d.state.runConfig.Volumes = map[string]struct{}{}
|
||||||
|
@ -570,7 +556,6 @@ func dispatchVolume(d dispatchRequest, c *instructions.VolumeCommand) error {
|
||||||
//
|
//
|
||||||
// Set the signal that will be used to kill the container.
|
// Set the signal that will be used to kill the container.
|
||||||
func dispatchStopSignal(d dispatchRequest, c *instructions.StopSignalCommand) error {
|
func dispatchStopSignal(d dispatchRequest, c *instructions.StopSignalCommand) error {
|
||||||
|
|
||||||
_, err := signal.ParseSignal(c.Signal)
|
_, err := signal.ParseSignal(c.Signal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errdefs.InvalidParameter(err)
|
return errdefs.InvalidParameter(err)
|
||||||
|
|
|
@ -284,7 +284,6 @@ func TestHealthcheckNone(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHealthcheckCmd(t *testing.T) {
|
func TestHealthcheckCmd(t *testing.T) {
|
||||||
|
|
||||||
b := newBuilderWithMockBackend()
|
b := newBuilderWithMockBackend()
|
||||||
sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults())
|
sb := newDispatchRequest(b, '`', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults())
|
||||||
expectedTest := []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"}
|
expectedTest := []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"}
|
||||||
|
|
|
@ -140,7 +140,6 @@ func TestCopyRunConfig(t *testing.T) {
|
||||||
// Assert the original was not modified
|
// Assert the original was not modified
|
||||||
assert.Check(t, runConfig != runConfigCopy, testcase.doc)
|
assert.Check(t, runConfig != runConfigCopy, testcase.doc)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func fullMutableRunConfig() *container.Config {
|
func fullMutableRunConfig() *container.Config {
|
||||||
|
|
|
@ -13,9 +13,9 @@ import (
|
||||||
"github.com/docker/docker/builder"
|
"github.com/docker/docker/builder"
|
||||||
"github.com/docker/docker/builder/remotecontext/urlutil"
|
"github.com/docker/docker/builder/remotecontext/urlutil"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/docker/docker/pkg/fileutils"
|
|
||||||
"github.com/moby/buildkit/frontend/dockerfile/dockerignore"
|
"github.com/moby/buildkit/frontend/dockerfile/dockerignore"
|
||||||
"github.com/moby/buildkit/frontend/dockerfile/parser"
|
"github.com/moby/buildkit/frontend/dockerfile/parser"
|
||||||
|
"github.com/moby/patternmatcher"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
@ -130,7 +130,7 @@ func removeDockerfile(c modifiableContext, filesToRemove ...string) error {
|
||||||
f.Close()
|
f.Close()
|
||||||
filesToRemove = append([]string{".dockerignore"}, filesToRemove...)
|
filesToRemove = append([]string{".dockerignore"}, filesToRemove...)
|
||||||
for _, fileToRemove := range filesToRemove {
|
for _, fileToRemove := range filesToRemove {
|
||||||
if rm, _ := fileutils.MatchesOrParentMatches(fileToRemove, excludes); rm {
|
if rm, _ := patternmatcher.MatchesOrParentMatches(fileToRemove, excludes); rm {
|
||||||
if err := c.Remove(fileToRemove); err != nil {
|
if err := c.Remove(fileToRemove); err != nil {
|
||||||
logrus.Errorf("failed to remove %s: %v", fileToRemove, err)
|
logrus.Errorf("failed to remove %s: %v", fileToRemove, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,7 +72,6 @@ func TestProcessShouldRemoveDockerfileDockerignore(t *testing.T) {
|
||||||
executeProcess(t, contextDir)
|
executeProcess(t, contextDir)
|
||||||
|
|
||||||
checkDirectory(t, contextDir, []string{shouldStayFilename})
|
checkDirectory(t, contextDir, []string{shouldStayFilename})
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessNoDockerignore(t *testing.T) {
|
func TestProcessNoDockerignore(t *testing.T) {
|
||||||
|
@ -85,7 +84,6 @@ func TestProcessNoDockerignore(t *testing.T) {
|
||||||
executeProcess(t, contextDir)
|
executeProcess(t, contextDir)
|
||||||
|
|
||||||
checkDirectory(t, contextDir, []string{shouldStayFilename, builder.DefaultDockerfileName})
|
checkDirectory(t, contextDir, []string{shouldStayFilename, builder.DefaultDockerfileName})
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProcessShouldLeaveAllFiles(t *testing.T) {
|
func TestProcessShouldLeaveAllFiles(t *testing.T) {
|
||||||
|
@ -99,7 +97,6 @@ func TestProcessShouldLeaveAllFiles(t *testing.T) {
|
||||||
executeProcess(t, contextDir)
|
executeProcess(t, contextDir)
|
||||||
|
|
||||||
checkDirectory(t, contextDir, []string{shouldStayFilename, builder.DefaultDockerfileName, dockerignoreFilename})
|
checkDirectory(t, contextDir, []string{shouldStayFilename, builder.DefaultDockerfileName, dockerignoreFilename})
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: remove after moving to a separate pkg
|
// TODO: remove after moving to a separate pkg
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
|
|
||||||
// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory.
|
// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory.
|
||||||
func MakeGitContext(gitURL string) (builder.Source, error) {
|
func MakeGitContext(gitURL string) (builder.Source, error) {
|
||||||
root, err := git.Clone(gitURL)
|
root, err := git.Clone(gitURL, git.WithIsolatedConfig(true))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,21 +16,38 @@ type gitRepo struct {
|
||||||
remote string
|
remote string
|
||||||
ref string
|
ref string
|
||||||
subdir string
|
subdir string
|
||||||
|
|
||||||
|
isolateConfig bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloneOption changes the behaviour of Clone().
|
||||||
|
type CloneOption func(*gitRepo)
|
||||||
|
|
||||||
|
// WithIsolatedConfig disables reading the user or system gitconfig files when
|
||||||
|
// performing Git operations.
|
||||||
|
func WithIsolatedConfig(v bool) CloneOption {
|
||||||
|
return func(gr *gitRepo) {
|
||||||
|
gr.isolateConfig = v
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clone clones a repository into a newly created directory which
|
// Clone clones a repository into a newly created directory which
|
||||||
// will be under "docker-build-git"
|
// will be under "docker-build-git"
|
||||||
func Clone(remoteURL string) (string, error) {
|
func Clone(remoteURL string, opts ...CloneOption) (string, error) {
|
||||||
repo, err := parseRemoteURL(remoteURL)
|
repo, err := parseRemoteURL(remoteURL)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return cloneGitRepo(repo)
|
for _, opt := range opts {
|
||||||
|
opt(&repo)
|
||||||
|
}
|
||||||
|
|
||||||
|
return repo.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
func cloneGitRepo(repo gitRepo) (checkoutDir string, err error) {
|
func (repo gitRepo) clone() (checkoutDir string, err error) {
|
||||||
fetch := fetchArgs(repo.remote, repo.ref)
|
fetch := fetchArgs(repo.remote, repo.ref)
|
||||||
|
|
||||||
root, err := os.MkdirTemp("", "docker-build-git")
|
root, err := os.MkdirTemp("", "docker-build-git")
|
||||||
|
@ -44,21 +61,21 @@ func cloneGitRepo(repo gitRepo) (checkoutDir string, err error) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if out, err := gitWithinDir(root, "init"); err != nil {
|
if out, err := repo.gitWithinDir(root, "init"); err != nil {
|
||||||
return "", errors.Wrapf(err, "failed to init repo at %s: %s", root, out)
|
return "", errors.Wrapf(err, "failed to init repo at %s: %s", root, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add origin remote for compatibility with previous implementation that
|
// Add origin remote for compatibility with previous implementation that
|
||||||
// used "git clone" and also to make sure local refs are created for branches
|
// used "git clone" and also to make sure local refs are created for branches
|
||||||
if out, err := gitWithinDir(root, "remote", "add", "origin", repo.remote); err != nil {
|
if out, err := repo.gitWithinDir(root, "remote", "add", "origin", repo.remote); err != nil {
|
||||||
return "", errors.Wrapf(err, "failed add origin repo at %s: %s", repo.remote, out)
|
return "", errors.Wrapf(err, "failed add origin repo at %s: %s", repo.remote, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
if output, err := gitWithinDir(root, fetch...); err != nil {
|
if output, err := repo.gitWithinDir(root, fetch...); err != nil {
|
||||||
return "", errors.Wrapf(err, "error fetching: %s", output)
|
return "", errors.Wrapf(err, "error fetching: %s", output)
|
||||||
}
|
}
|
||||||
|
|
||||||
checkoutDir, err = checkoutGit(root, repo.ref, repo.subdir)
|
checkoutDir, err = repo.checkout(root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -162,20 +179,20 @@ func supportsShallowClone(remoteURL string) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkoutGit(root, ref, subdir string) (string, error) {
|
func (repo gitRepo) checkout(root string) (string, error) {
|
||||||
// Try checking out by ref name first. This will work on branches and sets
|
// Try checking out by ref name first. This will work on branches and sets
|
||||||
// .git/HEAD to the current branch name
|
// .git/HEAD to the current branch name
|
||||||
if output, err := gitWithinDir(root, "checkout", ref); err != nil {
|
if output, err := repo.gitWithinDir(root, "checkout", repo.ref); err != nil {
|
||||||
// If checking out by branch name fails check out the last fetched ref
|
// If checking out by branch name fails check out the last fetched ref
|
||||||
if _, err2 := gitWithinDir(root, "checkout", "FETCH_HEAD"); err2 != nil {
|
if _, err2 := repo.gitWithinDir(root, "checkout", "FETCH_HEAD"); err2 != nil {
|
||||||
return "", errors.Wrapf(err, "error checking out %s: %s", ref, output)
|
return "", errors.Wrapf(err, "error checking out %s: %s", repo.ref, output)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if subdir != "" {
|
if repo.subdir != "" {
|
||||||
newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, subdir), root)
|
newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, repo.subdir), root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrapf(err, "error setting git context, %q not within git root", subdir)
|
return "", errors.Wrapf(err, "error setting git context, %q not within git root", repo.subdir)
|
||||||
}
|
}
|
||||||
|
|
||||||
fi, err := os.Stat(newCtx)
|
fi, err := os.Stat(newCtx)
|
||||||
|
@ -191,13 +208,21 @@ func checkoutGit(root, ref, subdir string) (string, error) {
|
||||||
return root, nil
|
return root, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func gitWithinDir(dir string, args ...string) ([]byte, error) {
|
func (repo gitRepo) gitWithinDir(dir string, args ...string) ([]byte, error) {
|
||||||
a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")}
|
args = append([]string{"-c", "protocol.file.allow=never"}, args...) // Block sneaky repositories from using repos from the filesystem as submodules.
|
||||||
return git(append(a, args...)...)
|
cmd := exec.Command("git", args...)
|
||||||
}
|
cmd.Dir = dir
|
||||||
|
// Disable unsafe remote protocols.
|
||||||
|
cmd.Env = append(os.Environ(), "GIT_PROTOCOL_FROM_USER=0")
|
||||||
|
|
||||||
func git(args ...string) ([]byte, error) {
|
if repo.isolateConfig {
|
||||||
return exec.Command("git", args...).CombinedOutput()
|
cmd.Env = append(cmd.Env,
|
||||||
|
"GIT_CONFIG_NOSYSTEM=1", // Disable reading from system gitconfig.
|
||||||
|
"HOME=/dev/null", // Disable reading from user gitconfig.
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd.CombinedOutput()
|
||||||
}
|
}
|
||||||
|
|
||||||
// isGitTransport returns true if the provided str is a git transport by inspecting
|
// isGitTransport returns true if the provided str is a git transport by inspecting
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
package git // import "github.com/docker/docker/builder/remotecontext/git"
|
package git // import "github.com/docker/docker/builder/remotecontext/git"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/http/cgi"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
@ -160,7 +162,7 @@ func TestCloneArgsGit(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func gitGetConfig(name string) string {
|
func gitGetConfig(name string) string {
|
||||||
b, err := git([]string{"config", "--get", name}...)
|
b, err := gitRepo{}.gitWithinDir("", "config", "--get", name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// since we are interested in empty or non empty string,
|
// since we are interested in empty or non empty string,
|
||||||
// we can safely ignore the err here.
|
// we can safely ignore the err here.
|
||||||
|
@ -170,9 +172,50 @@ func gitGetConfig(name string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCheckoutGit(t *testing.T) {
|
func TestCheckoutGit(t *testing.T) {
|
||||||
root, err := os.MkdirTemp("", "docker-build-git-checkout")
|
root := t.TempDir()
|
||||||
|
|
||||||
|
gitpath, err := exec.LookPath("git")
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
defer os.RemoveAll(root)
|
gitversion, _ := exec.Command(gitpath, "version").CombinedOutput()
|
||||||
|
t.Logf("%s", gitversion) // E.g. "git version 2.30.2"
|
||||||
|
|
||||||
|
// Serve all repositories under root using the Smart HTTP protocol so
|
||||||
|
// they can be cloned. The Dumb HTTP protocol is incompatible with
|
||||||
|
// shallow cloning but we unconditionally shallow-clone submodules, and
|
||||||
|
// we explicitly disable the file protocol.
|
||||||
|
// (Another option would be to use `git daemon` and the Git protocol,
|
||||||
|
// but that listens on a fixed port number which is a recipe for
|
||||||
|
// disaster in CI. Funnily enough, `git daemon --port=0` works but there
|
||||||
|
// is no easy way to discover which port got picked!)
|
||||||
|
|
||||||
|
// Associate git-http-backend logs with the current (sub)test.
|
||||||
|
// Incompatible with parallel subtests.
|
||||||
|
currentSubtest := t
|
||||||
|
githttp := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
var logs bytes.Buffer
|
||||||
|
(&cgi.Handler{
|
||||||
|
Path: gitpath,
|
||||||
|
Args: []string{"http-backend"},
|
||||||
|
Dir: root,
|
||||||
|
Env: []string{
|
||||||
|
"GIT_PROJECT_ROOT=" + root,
|
||||||
|
"GIT_HTTP_EXPORT_ALL=1",
|
||||||
|
},
|
||||||
|
Stderr: &logs,
|
||||||
|
}).ServeHTTP(w, r)
|
||||||
|
if logs.Len() == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
line, err := logs.ReadString('\n')
|
||||||
|
currentSubtest.Log("git-http-backend: " + line)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
server := httptest.NewServer(&githttp)
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
autocrlf := gitGetConfig("core.autocrlf")
|
autocrlf := gitGetConfig("core.autocrlf")
|
||||||
if !(autocrlf == "true" || autocrlf == "false" ||
|
if !(autocrlf == "true" || autocrlf == "false" ||
|
||||||
|
@ -184,88 +227,54 @@ func TestCheckoutGit(t *testing.T) {
|
||||||
eol = "\r\n"
|
eol = "\r\n"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
must := func(out []byte, err error) {
|
||||||
|
t.Helper()
|
||||||
|
if len(out) > 0 {
|
||||||
|
t.Logf("%s", out)
|
||||||
|
}
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
gitDir := filepath.Join(root, "repo")
|
gitDir := filepath.Join(root, "repo")
|
||||||
_, err = git("init", gitDir)
|
must(gitRepo{}.gitWithinDir(root, "-c", "init.defaultBranch=master", "init", gitDir))
|
||||||
assert.NilError(t, err)
|
must(gitRepo{}.gitWithinDir(gitDir, "config", "user.email", "test@docker.com"))
|
||||||
|
must(gitRepo{}.gitWithinDir(gitDir, "config", "user.name", "Docker test"))
|
||||||
_, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com")
|
assert.NilError(t, os.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644))
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
_, err = gitWithinDir(gitDir, "config", "user.name", "Docker test")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
err = os.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
subDir := filepath.Join(gitDir, "subdir")
|
subDir := filepath.Join(gitDir, "subdir")
|
||||||
assert.NilError(t, os.Mkdir(subDir, 0755))
|
assert.NilError(t, os.Mkdir(subDir, 0755))
|
||||||
|
assert.NilError(t, os.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644))
|
||||||
err = os.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
if runtime.GOOS != "windows" {
|
if runtime.GOOS != "windows" {
|
||||||
if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil {
|
assert.NilError(t, os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")))
|
||||||
t.Fatal(err)
|
assert.NilError(t, os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")))
|
||||||
}
|
|
||||||
|
|
||||||
if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = gitWithinDir(gitDir, "add", "-A")
|
must(gitRepo{}.gitWithinDir(gitDir, "add", "-A"))
|
||||||
assert.NilError(t, err)
|
must(gitRepo{}.gitWithinDir(gitDir, "commit", "-am", "First commit"))
|
||||||
|
must(gitRepo{}.gitWithinDir(gitDir, "checkout", "-b", "test"))
|
||||||
|
|
||||||
_, err = gitWithinDir(gitDir, "commit", "-am", "First commit")
|
assert.NilError(t, os.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644))
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, os.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644))
|
||||||
|
|
||||||
_, err = gitWithinDir(gitDir, "checkout", "-b", "test")
|
must(gitRepo{}.gitWithinDir(gitDir, "add", "-A"))
|
||||||
assert.NilError(t, err)
|
must(gitRepo{}.gitWithinDir(gitDir, "commit", "-am", "Branch commit"))
|
||||||
|
must(gitRepo{}.gitWithinDir(gitDir, "checkout", "master"))
|
||||||
err = os.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
err = os.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
_, err = gitWithinDir(gitDir, "add", "-A")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
_, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
_, err = gitWithinDir(gitDir, "checkout", "master")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
// set up submodule
|
// set up submodule
|
||||||
subrepoDir := filepath.Join(root, "subrepo")
|
subrepoDir := filepath.Join(root, "subrepo")
|
||||||
_, err = git("init", subrepoDir)
|
must(gitRepo{}.gitWithinDir(root, "-c", "init.defaultBranch=master", "init", subrepoDir))
|
||||||
assert.NilError(t, err)
|
must(gitRepo{}.gitWithinDir(subrepoDir, "config", "user.email", "test@docker.com"))
|
||||||
|
must(gitRepo{}.gitWithinDir(subrepoDir, "config", "user.name", "Docker test"))
|
||||||
|
|
||||||
_, err = gitWithinDir(subrepoDir, "config", "user.email", "test@docker.com")
|
assert.NilError(t, os.WriteFile(filepath.Join(subrepoDir, "subfile"), []byte("subcontents"), 0644))
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
_, err = gitWithinDir(subrepoDir, "config", "user.name", "Docker test")
|
must(gitRepo{}.gitWithinDir(subrepoDir, "add", "-A"))
|
||||||
assert.NilError(t, err)
|
must(gitRepo{}.gitWithinDir(subrepoDir, "commit", "-am", "Subrepo initial"))
|
||||||
|
|
||||||
err = os.WriteFile(filepath.Join(subrepoDir, "subfile"), []byte("subcontents"), 0644)
|
must(gitRepo{}.gitWithinDir(gitDir, "submodule", "add", server.URL+"/subrepo", "sub"))
|
||||||
assert.NilError(t, err)
|
must(gitRepo{}.gitWithinDir(gitDir, "add", "-A"))
|
||||||
|
must(gitRepo{}.gitWithinDir(gitDir, "commit", "-am", "With submodule"))
|
||||||
_, err = gitWithinDir(subrepoDir, "add", "-A")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
_, err = gitWithinDir(subrepoDir, "commit", "-am", "Subrepo initial")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
cmd := exec.Command("git", "submodule", "add", subrepoDir, "sub") // this command doesn't work with --work-tree
|
|
||||||
cmd.Dir = gitDir
|
|
||||||
assert.NilError(t, cmd.Run())
|
|
||||||
|
|
||||||
_, err = gitWithinDir(gitDir, "add", "-A")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
_, err = gitWithinDir(gitDir, "commit", "-am", "With submodule")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
type singleCase struct {
|
type singleCase struct {
|
||||||
frag string
|
frag string
|
||||||
|
@ -299,28 +308,31 @@ func TestCheckoutGit(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
ref, subdir := getRefAndSubdir(c.frag)
|
t.Run(c.frag, func(t *testing.T) {
|
||||||
r, err := cloneGitRepo(gitRepo{remote: gitDir, ref: ref, subdir: subdir})
|
currentSubtest = t
|
||||||
|
ref, subdir := getRefAndSubdir(c.frag)
|
||||||
|
r, err := gitRepo{remote: server.URL + "/repo", ref: ref, subdir: subdir}.clone()
|
||||||
|
|
||||||
if c.fail {
|
if c.fail {
|
||||||
assert.Check(t, is.ErrorContains(err, ""))
|
assert.Check(t, is.ErrorContains(err, ""))
|
||||||
continue
|
return
|
||||||
}
|
}
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(r)
|
|
||||||
if c.submodule {
|
|
||||||
b, err := os.ReadFile(filepath.Join(r, "sub/subfile"))
|
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
assert.Check(t, is.Equal("subcontents", string(b)))
|
defer os.RemoveAll(r)
|
||||||
} else {
|
if c.submodule {
|
||||||
_, err := os.Stat(filepath.Join(r, "sub/subfile"))
|
b, err := os.ReadFile(filepath.Join(r, "sub/subfile"))
|
||||||
assert.Assert(t, is.ErrorContains(err, ""))
|
assert.NilError(t, err)
|
||||||
assert.Assert(t, os.IsNotExist(err))
|
assert.Check(t, is.Equal("subcontents", string(b)))
|
||||||
}
|
} else {
|
||||||
|
_, err := os.Stat(filepath.Join(r, "sub/subfile"))
|
||||||
|
assert.Assert(t, is.ErrorContains(err, ""))
|
||||||
|
assert.Assert(t, os.IsNotExist(err))
|
||||||
|
}
|
||||||
|
|
||||||
b, err := os.ReadFile(filepath.Join(r, "Dockerfile"))
|
b, err := os.ReadFile(filepath.Join(r, "Dockerfile"))
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
assert.Check(t, is.Equal(c.exp, string(b)))
|
assert.Check(t, is.Equal(c.exp, string(b)))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -80,10 +80,10 @@ func GetWithStatusError(address string) (resp *http.Response, err error) {
|
||||||
// inspectResponse looks into the http response data at r to determine whether its
|
// inspectResponse looks into the http response data at r to determine whether its
|
||||||
// content-type is on the list of acceptable content types for remote build contexts.
|
// content-type is on the list of acceptable content types for remote build contexts.
|
||||||
// This function returns:
|
// This function returns:
|
||||||
// - a string representation of the detected content-type
|
// - a string representation of the detected content-type
|
||||||
// - an io.Reader for the response body
|
// - an io.Reader for the response body
|
||||||
// - an error value which will be non-nil either when something goes wrong while
|
// - an error value which will be non-nil either when something goes wrong while
|
||||||
// reading bytes from r or when the detected content-type is not acceptable.
|
// reading bytes from r or when the detected content-type is not acceptable.
|
||||||
func inspectResponse(ct string, r io.Reader, clen int64) (string, io.Reader, error) {
|
func inspectResponse(ct string, r io.Reader, clen int64) (string, io.Reader, error) {
|
||||||
plen := clen
|
plen := clen
|
||||||
if plen <= 0 || plen > maxPreambleLength {
|
if plen <= 0 || plen > maxPreambleLength {
|
||||||
|
|
|
@ -30,12 +30,11 @@ func IsURL(str string) bool {
|
||||||
//
|
//
|
||||||
// The following patterns are considered to be a Git URL:
|
// The following patterns are considered to be a Git URL:
|
||||||
//
|
//
|
||||||
// - https://(.*).git(?:#.+)?$ git repository URL with optional fragment, as
|
// - https://(.*).git(?:#.+)?$ git repository URL with optional fragment, as known to be used by GitHub and GitLab.
|
||||||
// known to be used by GitHub and GitLab.
|
// - http://(.*).git(?:#.+)?$ same, but non-TLS
|
||||||
// - http://(.*).git(?:#.+)?$ same, but non-TLS
|
// - git://(.*) URLs using git:// scheme
|
||||||
// - git://(.*) URLs using git:// scheme
|
// - git@(.*)
|
||||||
// - git@(.*)
|
// - github.com/ see description below
|
||||||
// - github.com/ see description below
|
|
||||||
//
|
//
|
||||||
// The github.com/ prefix is a special case used to treat context-paths
|
// The github.com/ prefix is a special case used to treat context-paths
|
||||||
// starting with "github.com/" as a git URL if the given path does not
|
// starting with "github.com/" as a git URL if the given path does not
|
||||||
|
@ -49,7 +48,7 @@ func IsURL(str string) bool {
|
||||||
// path. Code using this function should check if the path exists locally before
|
// path. Code using this function should check if the path exists locally before
|
||||||
// using it as a URL.
|
// using it as a URL.
|
||||||
//
|
//
|
||||||
// Fragments
|
// # Fragments
|
||||||
//
|
//
|
||||||
// Git URLs accept context configuration in their fragment section, separated by
|
// Git URLs accept context configuration in their fragment section, separated by
|
||||||
// a colon (`:`). The first part represents the reference to check out, and can
|
// a colon (`:`). The first part represents the reference to check out, and can
|
||||||
|
@ -74,7 +73,6 @@ func IsURL(str string) bool {
|
||||||
// | my-repo.git#master:directory | refs/heads/master | /directory |
|
// | my-repo.git#master:directory | refs/heads/master | /directory |
|
||||||
// | my-repo.git#mytag:directory | refs/tags/my-tag | /directory |
|
// | my-repo.git#mytag:directory | refs/tags/my-tag | /directory |
|
||||||
// | my-repo.git#mybranch:directory | refs/heads/my-branch | /directory |
|
// | my-repo.git#mybranch:directory | refs/heads/my-branch | /directory |
|
||||||
//
|
|
||||||
func IsGitURL(str string) bool {
|
func IsGitURL(str string) bool {
|
||||||
if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) {
|
if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) {
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -4,7 +4,7 @@ Package client is a Go client for the Docker Engine API.
|
||||||
For more information about the Engine API, see the documentation:
|
For more information about the Engine API, see the documentation:
|
||||||
https://docs.docker.com/engine/api/
|
https://docs.docker.com/engine/api/
|
||||||
|
|
||||||
Usage
|
# Usage
|
||||||
|
|
||||||
You use the library by creating a client object and calling methods on it. The
|
You use the library by creating a client object and calling methods on it. The
|
||||||
client can be created either from environment variables with NewClientWithOpts(client.FromEnv),
|
client can be created either from environment variables with NewClientWithOpts(client.FromEnv),
|
||||||
|
@ -37,7 +37,6 @@ For example, to list running containers (the equivalent of "docker ps"):
|
||||||
fmt.Printf("%s %s\n", container.ID[:10], container.Image)
|
fmt.Printf("%s %s\n", container.ID[:10], container.Image)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
*/
|
*/
|
||||||
package client // import "github.com/docker/docker/client"
|
package client // import "github.com/docker/docker/client"
|
||||||
|
|
||||||
|
@ -121,12 +120,10 @@ func CheckRedirect(req *http.Request, via []*http.Request) error {
|
||||||
// itself with values from environment variables (client.FromEnv), and has
|
// itself with values from environment variables (client.FromEnv), and has
|
||||||
// automatic API version negotiation enabled (client.WithAPIVersionNegotiation()).
|
// automatic API version negotiation enabled (client.WithAPIVersionNegotiation()).
|
||||||
//
|
//
|
||||||
//
|
|
||||||
// cli, err := client.NewClientWithOpts(
|
// cli, err := client.NewClientWithOpts(
|
||||||
// client.FromEnv,
|
// client.FromEnv,
|
||||||
// client.WithAPIVersionNegotiation(),
|
// client.WithAPIVersionNegotiation(),
|
||||||
// )
|
// )
|
||||||
//
|
|
||||||
func NewClientWithOpts(ops ...Opt) (*Client, error) {
|
func NewClientWithOpts(ops ...Opt) (*Client, error) {
|
||||||
client, err := defaultHTTPClient(DefaultDockerHost)
|
client, err := defaultHTTPClient(DefaultDockerHost)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -22,7 +22,7 @@ import (
|
||||||
// multiplexed.
|
// multiplexed.
|
||||||
// The format of the multiplexed stream is as follows:
|
// The format of the multiplexed stream is as follows:
|
||||||
//
|
//
|
||||||
// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
|
// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
|
||||||
//
|
//
|
||||||
// STREAM_TYPE can be 1 for stdout and 2 for stderr
|
// STREAM_TYPE can be 1 for stdout and 2 for stderr
|
||||||
//
|
//
|
||||||
|
|
|
@ -26,23 +26,25 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config
|
||||||
if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil {
|
if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil {
|
||||||
return response, err
|
return response, err
|
||||||
}
|
}
|
||||||
|
|
||||||
clientVersion := cli.ClientVersion()
|
|
||||||
|
|
||||||
// When using API 1.24 and under, the client is responsible for removing the container
|
|
||||||
if hostConfig != nil && versions.LessThan(clientVersion, "1.25") {
|
|
||||||
hostConfig.AutoRemove = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize
|
|
||||||
if hostConfig != nil && platform != nil && platform.OS == "linux" && versions.LessThan(clientVersion, "1.42") {
|
|
||||||
hostConfig.ConsoleSize = [2]uint{0, 0}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil {
|
if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil {
|
||||||
return response, err
|
return response, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if hostConfig != nil {
|
||||||
|
if versions.LessThan(cli.ClientVersion(), "1.25") {
|
||||||
|
// When using API 1.24 and under, the client is responsible for removing the container
|
||||||
|
hostConfig.AutoRemove = false
|
||||||
|
}
|
||||||
|
if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") || versions.LessThan(cli.ClientVersion(), "1.40") {
|
||||||
|
// KernelMemory was added in API 1.40, and deprecated in API 1.42
|
||||||
|
hostConfig.KernelMemory = 0
|
||||||
|
}
|
||||||
|
if platform != nil && platform.OS == "linux" && versions.LessThan(cli.ClientVersion(), "1.42") {
|
||||||
|
// When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize
|
||||||
|
hostConfig.ConsoleSize = [2]uint{0, 0}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
query := url.Values{}
|
query := url.Values{}
|
||||||
if p := formatPlatform(platform); p != "" {
|
if p := formatPlatform(platform); p != "" {
|
||||||
query.Set("platform", p)
|
query.Set("platform", p)
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
// multiplexed.
|
// multiplexed.
|
||||||
// The format of the multiplexed stream is as follows:
|
// The format of the multiplexed stream is as follows:
|
||||||
//
|
//
|
||||||
// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
|
// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT}
|
||||||
//
|
//
|
||||||
// STREAM_TYPE can be 1 for stdout and 2 for stderr
|
// STREAM_TYPE can be 1 for stdout and 2 for stderr
|
||||||
//
|
//
|
||||||
|
|
|
@ -40,11 +40,11 @@ type notFound interface {
|
||||||
// IsErrNotFound returns true if the error is a NotFound error, which is returned
|
// IsErrNotFound returns true if the error is a NotFound error, which is returned
|
||||||
// by the API when some object is not found.
|
// by the API when some object is not found.
|
||||||
func IsErrNotFound(err error) bool {
|
func IsErrNotFound(err error) bool {
|
||||||
var e notFound
|
if errdefs.IsNotFound(err) {
|
||||||
if errors.As(err, &e) {
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return errdefs.IsNotFound(err)
|
var e notFound
|
||||||
|
return errors.As(err, &e)
|
||||||
}
|
}
|
||||||
|
|
||||||
type objectNotFoundError struct {
|
type objectNotFoundError struct {
|
||||||
|
@ -58,22 +58,11 @@ func (e objectNotFoundError) Error() string {
|
||||||
return fmt.Sprintf("Error: No such %s: %s", e.object, e.id)
|
return fmt.Sprintf("Error: No such %s: %s", e.object, e.id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// unauthorizedError represents an authorization error in a remote registry.
|
|
||||||
type unauthorizedError struct {
|
|
||||||
cause error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns a string representation of an unauthorizedError
|
|
||||||
func (u unauthorizedError) Error() string {
|
|
||||||
return u.cause.Error()
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsErrUnauthorized returns true if the error is caused
|
// IsErrUnauthorized returns true if the error is caused
|
||||||
// when a remote registry authentication fails
|
// when a remote registry authentication fails
|
||||||
|
//
|
||||||
|
// Deprecated: use errdefs.IsUnauthorized
|
||||||
func IsErrUnauthorized(err error) bool {
|
func IsErrUnauthorized(err error) bool {
|
||||||
if _, ok := err.(unauthorizedError); ok {
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
return errdefs.IsUnauthorized(err)
|
return errdefs.IsUnauthorized(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,32 +74,12 @@ func (e pluginPermissionDenied) Error() string {
|
||||||
return "Permission denied while installing plugin " + e.name
|
return "Permission denied while installing plugin " + e.name
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsErrPluginPermissionDenied returns true if the error is caused
|
|
||||||
// when a user denies a plugin's permissions
|
|
||||||
func IsErrPluginPermissionDenied(err error) bool {
|
|
||||||
_, ok := err.(pluginPermissionDenied)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
type notImplementedError struct {
|
|
||||||
message string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e notImplementedError) Error() string {
|
|
||||||
return e.message
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e notImplementedError) NotImplemented() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsErrNotImplemented returns true if the error is a NotImplemented error.
|
// IsErrNotImplemented returns true if the error is a NotImplemented error.
|
||||||
// This is returned by the API when a requested feature has not been
|
// This is returned by the API when a requested feature has not been
|
||||||
// implemented.
|
// implemented.
|
||||||
|
//
|
||||||
|
// Deprecated: use errdefs.IsNotImplemented
|
||||||
func IsErrNotImplemented(err error) bool {
|
func IsErrNotImplemented(err error) bool {
|
||||||
if _, ok := err.(notImplementedError); ok {
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
return errdefs.IsNotImplemented(err)
|
return errdefs.IsNotImplemented(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,6 @@ import (
|
||||||
// be sent over the error channel. If an error is sent all processing will be stopped. It's up
|
// be sent over the error channel. If an error is sent all processing will be stopped. It's up
|
||||||
// to the caller to reopen the stream in the event of an error by reinvoking this method.
|
// to the caller to reopen the stream in the event of an error by reinvoking this method.
|
||||||
func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) {
|
func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) {
|
||||||
|
|
||||||
messages := make(chan events.Message)
|
messages := make(chan events.Message)
|
||||||
errs := make(chan error, 1)
|
errs := make(chan error, 1)
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,6 @@ func TestEventsErrorFromServer(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEvents(t *testing.T) {
|
func TestEvents(t *testing.T) {
|
||||||
|
|
||||||
expectedURL := "/events"
|
expectedURL := "/events"
|
||||||
|
|
||||||
filters := filters.NewArgs()
|
filters := filters.NewArgs()
|
||||||
|
|
|
@ -34,6 +34,9 @@ func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions
|
||||||
if options.All {
|
if options.All {
|
||||||
query.Set("all", "1")
|
query.Set("all", "1")
|
||||||
}
|
}
|
||||||
|
if options.SharedSize && versions.GreaterThanOrEqualTo(cli.version, "1.42") {
|
||||||
|
query.Set("shared-size", "1")
|
||||||
|
}
|
||||||
|
|
||||||
serverResp, err := cli.get(ctx, "/images/json", query, nil)
|
serverResp, err := cli.get(ctx, "/images/json", query, nil)
|
||||||
defer ensureReaderClosed(serverResp)
|
defer ensureReaderClosed(serverResp)
|
||||||
|
|
|
@ -7,12 +7,15 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
|
"gotest.tools/v3/assert"
|
||||||
|
is "gotest.tools/v3/assert/cmp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestImageListError(t *testing.T) {
|
func TestImageListError(t *testing.T) {
|
||||||
|
@ -158,3 +161,41 @@ func TestImageListApiBefore125(t *testing.T) {
|
||||||
t.Fatalf("expected 2 images, got %v", images)
|
t.Fatalf("expected 2 images, got %v", images)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Checks if shared-size query parameter is set/not being set correctly
|
||||||
|
// for /images/json.
|
||||||
|
func TestImageListWithSharedSize(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
const sharedSize = "shared-size"
|
||||||
|
for _, tc := range []struct {
|
||||||
|
name string
|
||||||
|
version string
|
||||||
|
options types.ImageListOptions
|
||||||
|
sharedSize string // expected value for the shared-size query param, or empty if it should not be set.
|
||||||
|
}{
|
||||||
|
{name: "unset after 1.42, no options set", version: "1.42"},
|
||||||
|
{name: "set after 1.42, if requested", version: "1.42", options: types.ImageListOptions{SharedSize: true}, sharedSize: "1"},
|
||||||
|
{name: "unset before 1.42, even if requested", version: "1.41", options: types.ImageListOptions{SharedSize: true}},
|
||||||
|
} {
|
||||||
|
tc := tc
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
var query url.Values
|
||||||
|
client := &Client{
|
||||||
|
client: newMockClient(func(req *http.Request) (*http.Response, error) {
|
||||||
|
query = req.URL.Query()
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Body: io.NopCloser(strings.NewReader("[]")),
|
||||||
|
}, nil
|
||||||
|
}),
|
||||||
|
version: tc.version,
|
||||||
|
}
|
||||||
|
_, err := client.ImageList(context.Background(), tc.options)
|
||||||
|
assert.Check(t, err)
|
||||||
|
expectedSet := tc.sharedSize != ""
|
||||||
|
assert.Check(t, is.Equal(query.Has(sharedSize), expectedSet))
|
||||||
|
assert.Check(t, is.Equal(query.Get(sharedSize), tc.sharedSize))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -44,13 +44,6 @@ func FromEnv(c *Client) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithDialer applies the dialer.DialContext to the client transport. This can be
|
|
||||||
// used to set the Timeout and KeepAlive settings of the client.
|
|
||||||
// Deprecated: use WithDialContext
|
|
||||||
func WithDialer(dialer *net.Dialer) Opt {
|
|
||||||
return WithDialContext(dialer.DialContext)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithDialContext applies the dialer to the client transport. This can be
|
// WithDialContext applies the dialer to the client transport. This can be
|
||||||
// used to set the Timeout and KeepAlive settings of the client.
|
// used to set the Timeout and KeepAlive settings of the client.
|
||||||
func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt {
|
func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt {
|
||||||
|
|
|
@ -59,8 +59,8 @@ func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) error {
|
||||||
flags.Var(opts.NewNamedMapOpts("log-opts", conf.LogConfig.Config, nil), "log-opt", "Default log driver options for containers")
|
flags.Var(opts.NewNamedMapOpts("log-opts", conf.LogConfig.Config, nil), "log-opt", "Default log driver options for containers")
|
||||||
|
|
||||||
flags.StringVar(&conf.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API")
|
flags.StringVar(&conf.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API")
|
||||||
flags.IntVar(&conf.MaxConcurrentDownloads, "max-concurrent-downloads", conf.MaxConcurrentDownloads, "Set the max concurrent downloads for each pull")
|
flags.IntVar(&conf.MaxConcurrentDownloads, "max-concurrent-downloads", conf.MaxConcurrentDownloads, "Set the max concurrent downloads")
|
||||||
flags.IntVar(&conf.MaxConcurrentUploads, "max-concurrent-uploads", conf.MaxConcurrentUploads, "Set the max concurrent uploads for each push")
|
flags.IntVar(&conf.MaxConcurrentUploads, "max-concurrent-uploads", conf.MaxConcurrentUploads, "Set the max concurrent uploads")
|
||||||
flags.IntVar(&conf.MaxDownloadAttempts, "max-download-attempts", conf.MaxDownloadAttempts, "Set the max download attempts for each pull")
|
flags.IntVar(&conf.MaxDownloadAttempts, "max-download-attempts", conf.MaxDownloadAttempts, "Set the max download attempts for each pull")
|
||||||
flags.IntVar(&conf.ShutdownTimeout, "shutdown-timeout", conf.ShutdownTimeout, "Set the default shutdown timeout")
|
flags.IntVar(&conf.ShutdownTimeout, "shutdown-timeout", conf.ShutdownTimeout, "Set the default shutdown timeout")
|
||||||
|
|
||||||
|
@ -79,10 +79,9 @@ func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) error {
|
||||||
|
|
||||||
// Deprecated flags / options
|
// Deprecated flags / options
|
||||||
|
|
||||||
// "--graph" is "soft-deprecated" in favor of "data-root". This flag was added
|
//nolint:staticcheck // TODO(thaJeztah): remove in next release.
|
||||||
// before Docker 1.0, so won't be removed, only hidden, to discourage its usage.
|
flags.StringVarP(&conf.RootDeprecated, "graph", "g", conf.RootDeprecated, "Root of the Docker runtime")
|
||||||
flags.StringVarP(&conf.Root, "graph", "g", conf.Root, "Root of the Docker runtime")
|
_ = flags.MarkDeprecated("graph", "Use --data-root instead")
|
||||||
_ = flags.MarkHidden("graph")
|
|
||||||
flags.BoolVarP(&conf.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run")
|
flags.BoolVarP(&conf.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run")
|
||||||
_ = flags.MarkDeprecated("restart", "Please use a restart policy on docker run")
|
_ = flags.MarkDeprecated("restart", "Please use a restart policy on docker run")
|
||||||
flags.StringVar(&conf.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise")
|
flags.StringVar(&conf.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise")
|
||||||
|
|
|
@ -323,7 +323,6 @@ func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, e
|
||||||
|
|
||||||
func (cli *DaemonCli) reloadConfig() {
|
func (cli *DaemonCli) reloadConfig() {
|
||||||
reload := func(c *config.Config) {
|
reload := func(c *config.Config) {
|
||||||
|
|
||||||
// Revalidate and reload the authorization plugins
|
// Revalidate and reload the authorization plugins
|
||||||
if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil {
|
if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil {
|
||||||
logrus.Fatalf("Error validating authorization plugin: %v", err)
|
logrus.Fatalf("Error validating authorization plugin: %v", err)
|
||||||
|
@ -395,9 +394,6 @@ func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
|
||||||
conf.Hosts = opts.Hosts
|
conf.Hosts = opts.Hosts
|
||||||
conf.LogLevel = opts.LogLevel
|
conf.LogLevel = opts.LogLevel
|
||||||
|
|
||||||
if flags.Changed("graph") && flags.Changed("data-root") {
|
|
||||||
return nil, errors.New(`cannot specify both "--graph" and "--data-root" option`)
|
|
||||||
}
|
|
||||||
if flags.Changed(FlagTLS) {
|
if flags.Changed(FlagTLS) {
|
||||||
conf.TLS = &opts.TLS
|
conf.TLS = &opts.TLS
|
||||||
}
|
}
|
||||||
|
@ -448,10 +444,6 @@ func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if flags.Changed("graph") {
|
|
||||||
logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if duplicate label-keys with different values are found
|
// Check if duplicate label-keys with different values are found
|
||||||
newLabels, err := config.GetConflictFreeLabels(conf.Labels)
|
newLabels, err := config.GetConflictFreeLabels(conf.Labels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -608,7 +600,7 @@ func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !cli.Config.CriContainerd {
|
if !cli.Config.CriContainerd {
|
||||||
opts = append(opts, supervisor.WithPlugin("cri", nil))
|
opts = append(opts, supervisor.WithPlugin("io.containerd.grpc.v1.cri", nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
return opts, nil
|
return opts, nil
|
||||||
|
@ -648,7 +640,7 @@ func newAPIServerConfig(config *config.Config) (*apiserver.Config, error) {
|
||||||
|
|
||||||
// checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify
|
// checkTLSAuthOK checks basically for an explicitly disabled TLS/TLSVerify
|
||||||
// Going forward we do not want to support a scenario where dockerd listens
|
// Going forward we do not want to support a scenario where dockerd listens
|
||||||
// on TCP without either TLS client auth (or an explicit opt-in to disable it)
|
// on TCP without either TLS client auth (or an explicit opt-in to disable it)
|
||||||
func checkTLSAuthOK(c *config.Config) bool {
|
func checkTLSAuthOK(c *config.Config) bool {
|
||||||
if c.TLS == nil {
|
if c.TLS == nil {
|
||||||
// Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled
|
// Either TLS is enabled by default, in which case TLS verification should be enabled by default, or explicitly disabled
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
metrics "github.com/docker/go-metrics"
|
metrics "github.com/docker/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
@ -24,7 +25,11 @@ func startMetricsServer(addr string) error {
|
||||||
mux.Handle("/metrics", metrics.Handler())
|
mux.Handle("/metrics", metrics.Handler())
|
||||||
go func() {
|
go func() {
|
||||||
logrus.Infof("metrics API listening on %s", l.Addr())
|
logrus.Infof("metrics API listening on %s", l.Addr())
|
||||||
if err := http.Serve(l, mux); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
|
srv := &http.Server{
|
||||||
|
Handler: mux,
|
||||||
|
ReadHeaderTimeout: 5 * time.Minute, // "G112: Potential Slowloris Attack (gosec)"; not a real concern for our use, so setting a long timeout.
|
||||||
|
}
|
||||||
|
if err := srv.Serve(l); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
|
||||||
logrus.WithError(err).Error("error serving metrics API")
|
logrus.WithError(err).Error("error serving metrics API")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
|
@ -27,9 +26,8 @@ var (
|
||||||
flUnregisterService *bool
|
flUnregisterService *bool
|
||||||
flRunService *bool
|
flRunService *bool
|
||||||
|
|
||||||
setStdHandle = windows.NewLazySystemDLL("kernel32.dll").NewProc("SetStdHandle")
|
oldStderr windows.Handle
|
||||||
oldStderr windows.Handle
|
panicFile *os.File
|
||||||
panicFile *os.File
|
|
||||||
|
|
||||||
service *handler
|
service *handler
|
||||||
)
|
)
|
||||||
|
@ -188,35 +186,14 @@ func registerService() error {
|
||||||
}
|
}
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
|
|
||||||
// See http://stackoverflow.com/questions/35151052/how-do-i-configure-failure-actions-of-a-windows-service-written-in-go
|
err = s.SetRecoveryActions(
|
||||||
const (
|
[]mgr.RecoveryAction{
|
||||||
scActionNone = 0
|
{Type: mgr.ServiceRestart, Delay: 15 * time.Second},
|
||||||
scActionRestart = 1
|
{Type: mgr.ServiceRestart, Delay: 15 * time.Second},
|
||||||
scActionReboot = 2
|
{Type: mgr.NoAction},
|
||||||
scActionRunCommand = 3
|
},
|
||||||
|
uint32(24*time.Hour/time.Second),
|
||||||
serviceConfigFailureActions = 2
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type serviceFailureActions struct {
|
|
||||||
ResetPeriod uint32
|
|
||||||
RebootMsg *uint16
|
|
||||||
Command *uint16
|
|
||||||
ActionsCount uint32
|
|
||||||
Actions uintptr
|
|
||||||
}
|
|
||||||
|
|
||||||
type scAction struct {
|
|
||||||
Type uint32
|
|
||||||
Delay uint32
|
|
||||||
}
|
|
||||||
t := []scAction{
|
|
||||||
{Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)},
|
|
||||||
{Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)},
|
|
||||||
{Type: scActionNone},
|
|
||||||
}
|
|
||||||
lpInfo := serviceFailureActions{ResetPeriod: uint32(24 * time.Hour / time.Second), ActionsCount: uint32(3), Actions: uintptr(unsafe.Pointer(&t[0]))}
|
|
||||||
err = windows.ChangeServiceConfig2(s.Handle, serviceConfigFailureActions, (*byte)(unsafe.Pointer(&lpInfo)))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -264,7 +241,8 @@ func initService(daemonCli *DaemonCli) (bool, bool, error) {
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
interactive, err := svc.IsAnInteractiveSession()
|
// Check if we're running as a Windows service or interactively.
|
||||||
|
isService, err := svc.IsWindowsService()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, false, err
|
return false, false, err
|
||||||
}
|
}
|
||||||
|
@ -276,7 +254,7 @@ func initService(daemonCli *DaemonCli) (bool, bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var log *eventlog.Log
|
var log *eventlog.Log
|
||||||
if !interactive {
|
if isService {
|
||||||
log, err = eventlog.Open(*flServiceName)
|
log, err = eventlog.Open(*flServiceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, false, err
|
return false, false, err
|
||||||
|
@ -288,10 +266,10 @@ func initService(daemonCli *DaemonCli) (bool, bool, error) {
|
||||||
|
|
||||||
service = h
|
service = h
|
||||||
go func() {
|
go func() {
|
||||||
if interactive {
|
if isService {
|
||||||
err = debug.Run(*flServiceName, h)
|
|
||||||
} else {
|
|
||||||
err = svc.Run(*flServiceName, h)
|
err = svc.Run(*flServiceName, h)
|
||||||
|
} else {
|
||||||
|
err = debug.Run(*flServiceName, h)
|
||||||
}
|
}
|
||||||
|
|
||||||
h.fromsvc <- err
|
h.fromsvc <- err
|
||||||
|
@ -387,21 +365,19 @@ func initPanicFile(path string) error {
|
||||||
// Update STD_ERROR_HANDLE to point to the panic file so that Go writes to
|
// Update STD_ERROR_HANDLE to point to the panic file so that Go writes to
|
||||||
// it when it panics. Remember the old stderr to restore it before removing
|
// it when it panics. Remember the old stderr to restore it before removing
|
||||||
// the panic file.
|
// the panic file.
|
||||||
sh := uint32(windows.STD_ERROR_HANDLE)
|
h, err := windows.GetStdHandle(windows.STD_ERROR_HANDLE)
|
||||||
h, err := windows.GetStdHandle(sh)
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
oldStderr = h
|
||||||
|
|
||||||
|
err = windows.SetStdHandle(windows.STD_ERROR_HANDLE, windows.Handle(panicFile.Fd()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
oldStderr = h
|
|
||||||
|
|
||||||
r, _, err := setStdHandle.Call(uintptr(sh), uintptr(panicFile.Fd()))
|
|
||||||
if r == 0 && err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset os.Stderr to the panic file (so fmt.Fprintf(os.Stderr,...) actually gets redirected)
|
// Reset os.Stderr to the panic file (so fmt.Fprintf(os.Stderr,...) actually gets redirected)
|
||||||
os.Stderr = os.NewFile(uintptr(panicFile.Fd()), "/dev/stderr")
|
os.Stderr = os.NewFile(panicFile.Fd(), "/dev/stderr")
|
||||||
|
|
||||||
// Force threads that panic to write to stderr (the panicFile handle now), otherwise it will go into the ether
|
// Force threads that panic to write to stderr (the panicFile handle now), otherwise it will go into the ether
|
||||||
log.SetOutput(os.Stderr)
|
log.SetOutput(os.Stderr)
|
||||||
|
@ -412,8 +388,7 @@ func initPanicFile(path string) error {
|
||||||
func removePanicFile() {
|
func removePanicFile() {
|
||||||
if st, err := panicFile.Stat(); err == nil {
|
if st, err := panicFile.Stat(); err == nil {
|
||||||
if st.Size() == 0 {
|
if st.Size() == 0 {
|
||||||
sh := uint32(windows.STD_ERROR_HANDLE)
|
windows.SetStdHandle(windows.STD_ERROR_HANDLE, oldStderr)
|
||||||
setStdHandle.Call(uintptr(sh), uintptr(oldStderr))
|
|
||||||
panicFile.Close()
|
panicFile.Close()
|
||||||
os.Remove(panicFile.Name())
|
os.Remove(panicFile.Name())
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,14 +14,13 @@ import (
|
||||||
// behavior expected from a vanilla unix command-line tool in general
|
// behavior expected from a vanilla unix command-line tool in general
|
||||||
// (and the Docker engine in particular).
|
// (and the Docker engine in particular).
|
||||||
//
|
//
|
||||||
// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated.
|
// - If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated.
|
||||||
// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is
|
// - If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is
|
||||||
// skipped and the process is terminated immediately (allows force quit of stuck daemon)
|
// skipped and the process is terminated immediately (allows force quit of stuck daemon)
|
||||||
// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit.
|
// - A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit.
|
||||||
// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while
|
// - Ignore SIGPIPE events. These are generated by systemd when journald is restarted while
|
||||||
// the docker daemon is not restarted and also running under systemd.
|
// the docker daemon is not restarted and also running under systemd.
|
||||||
// Fixes https://github.com/docker/docker/issues/19728
|
// Fixes https://github.com/docker/docker/issues/19728
|
||||||
//
|
|
||||||
func Trap(cleanup func(), logger interface {
|
func Trap(cleanup func(), logger interface {
|
||||||
Info(args ...interface{})
|
Info(args ...interface{})
|
||||||
}) {
|
}) {
|
||||||
|
|
|
@ -62,5 +62,4 @@ func TestTrap(t *testing.T) {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -81,17 +81,18 @@ type Container struct {
|
||||||
Driver string
|
Driver string
|
||||||
OS string
|
OS string
|
||||||
// MountLabel contains the options for the 'mount' command
|
// MountLabel contains the options for the 'mount' command
|
||||||
MountLabel string
|
MountLabel string
|
||||||
ProcessLabel string
|
ProcessLabel string
|
||||||
RestartCount int
|
RestartCount int
|
||||||
HasBeenStartedBefore bool
|
HasBeenStartedBefore bool
|
||||||
HasBeenManuallyStopped bool // used for unless-stopped restart policy
|
HasBeenManuallyStopped bool // used for unless-stopped restart policy
|
||||||
MountPoints map[string]*volumemounts.MountPoint
|
HasBeenManuallyRestarted bool `json:"-"` // used to distinguish restart caused by restart policy from the manual one
|
||||||
HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable
|
MountPoints map[string]*volumemounts.MountPoint
|
||||||
ExecCommands *exec.Store `json:"-"`
|
HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable
|
||||||
DependencyStore agentexec.DependencyGetter `json:"-"`
|
ExecCommands *exec.Store `json:"-"`
|
||||||
SecretReferences []*swarmtypes.SecretReference
|
DependencyStore agentexec.DependencyGetter `json:"-"`
|
||||||
ConfigReferences []*swarmtypes.ConfigReference
|
SecretReferences []*swarmtypes.SecretReference
|
||||||
|
ConfigReferences []*swarmtypes.ConfigReference
|
||||||
// logDriver for closing
|
// logDriver for closing
|
||||||
LogDriver logger.Logger `json:"-"`
|
LogDriver logger.Logger `json:"-"`
|
||||||
LogCopier *logger.Copier `json:"-"`
|
LogCopier *logger.Copier `json:"-"`
|
||||||
|
@ -295,10 +296,11 @@ func (container *Container) SetupWorkingDirectory(rootIdentity idtools.Identity)
|
||||||
// particular path inside the container as though you were a process in that
|
// particular path inside the container as though you were a process in that
|
||||||
// container.
|
// container.
|
||||||
//
|
//
|
||||||
// NOTE: The returned path is *only* safely scoped inside the container's BaseFS
|
// # NOTE
|
||||||
// if no component of the returned path changes (such as a component
|
// The returned path is *only* safely scoped inside the container's BaseFS
|
||||||
// symlinking to a different path) between using this method and using the
|
// if no component of the returned path changes (such as a component
|
||||||
// path. See symlink.FollowSymlinkInScope for more details.
|
// symlinking to a different path) between using this method and using the
|
||||||
|
// path. See symlink.FollowSymlinkInScope for more details.
|
||||||
func (container *Container) GetResourcePath(path string) (string, error) {
|
func (container *Container) GetResourcePath(path string) (string, error) {
|
||||||
if container.BaseFS == nil {
|
if container.BaseFS == nil {
|
||||||
return "", errors.New("GetResourcePath: BaseFS of container " + container.ID + " is unexpectedly nil")
|
return "", errors.New("GetResourcePath: BaseFS of container " + container.ID + " is unexpectedly nil")
|
||||||
|
@ -324,10 +326,11 @@ func (container *Container) GetResourcePath(path string) (string, error) {
|
||||||
// Only use this method to safely access the container's `container.json` or
|
// Only use this method to safely access the container's `container.json` or
|
||||||
// other metadata files. If in doubt, use container.GetResourcePath.
|
// other metadata files. If in doubt, use container.GetResourcePath.
|
||||||
//
|
//
|
||||||
// NOTE: The returned path is *only* safely scoped inside the container's root
|
// # NOTE
|
||||||
// if no component of the returned path changes (such as a component
|
// The returned path is *only* safely scoped inside the container's root
|
||||||
// symlinking to a different path) between using this method and using the
|
// if no component of the returned path changes (such as a component
|
||||||
// path. See symlink.FollowSymlinkInScope for more details.
|
// symlinking to a different path) between using this method and using the
|
||||||
|
// path. See symlink.FollowSymlinkInScope for more details.
|
||||||
func (container *Container) GetRootResourcePath(path string) (string, error) {
|
func (container *Container) GetRootResourcePath(path string) (string, error) {
|
||||||
// IMPORTANT - These are paths on the OS where the daemon is running, hence
|
// IMPORTANT - These are paths on the OS where the daemon is running, hence
|
||||||
// any filepath operations must be done in an OS agnostic way.
|
// any filepath operations must be done in an OS agnostic way.
|
||||||
|
|
|
@ -32,9 +32,10 @@ type State struct {
|
||||||
StartedAt time.Time
|
StartedAt time.Time
|
||||||
FinishedAt time.Time
|
FinishedAt time.Time
|
||||||
Health *Health
|
Health *Health
|
||||||
|
Removed bool `json:"-"`
|
||||||
|
|
||||||
waitStop chan struct{}
|
stopWaiters []chan<- StateStatus
|
||||||
waitRemove chan struct{}
|
removeOnlyWaiters []chan<- StateStatus
|
||||||
}
|
}
|
||||||
|
|
||||||
// StateStatus is used to return container wait results.
|
// StateStatus is used to return container wait results.
|
||||||
|
@ -57,12 +58,9 @@ func (s StateStatus) Err() error {
|
||||||
return s.err
|
return s.err
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewState creates a default state object with a fresh channel for state changes.
|
// NewState creates a default state object.
|
||||||
func NewState() *State {
|
func NewState() *State {
|
||||||
return &State{
|
return &State{}
|
||||||
waitStop: make(chan struct{}),
|
|
||||||
waitRemove: make(chan struct{}),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns a human-readable description of the state
|
// String returns a human-readable description of the state
|
||||||
|
@ -182,11 +180,10 @@ func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateS
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
|
|
||||||
if condition == WaitConditionNotRunning && !s.Running {
|
// Buffer so we can put status and finish even nobody receives it.
|
||||||
// Buffer so we can put it in the channel now.
|
resultC := make(chan StateStatus, 1)
|
||||||
resultC := make(chan StateStatus, 1)
|
|
||||||
|
|
||||||
// Send the current status.
|
if s.conditionAlreadyMet(condition) {
|
||||||
resultC <- StateStatus{
|
resultC <- StateStatus{
|
||||||
exitCode: s.ExitCode(),
|
exitCode: s.ExitCode(),
|
||||||
err: s.Err(),
|
err: s.Err(),
|
||||||
|
@ -195,20 +192,17 @@ func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateS
|
||||||
return resultC
|
return resultC
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we are waiting only for removal, the waitStop channel should
|
waitC := make(chan StateStatus, 1)
|
||||||
// remain nil and block forever.
|
|
||||||
var waitStop chan struct{}
|
// Removal wakes up both removeOnlyWaiters and stopWaiters
|
||||||
if condition < WaitConditionRemoved {
|
// Container could be removed while still in "created" state
|
||||||
waitStop = s.waitStop
|
// in which case it is never actually stopped
|
||||||
|
if condition == WaitConditionRemoved {
|
||||||
|
s.removeOnlyWaiters = append(s.removeOnlyWaiters, waitC)
|
||||||
|
} else {
|
||||||
|
s.stopWaiters = append(s.stopWaiters, waitC)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Always wait for removal, just in case the container gets removed
|
|
||||||
// while it is still in a "created" state, in which case it is never
|
|
||||||
// actually stopped.
|
|
||||||
waitRemove := s.waitRemove
|
|
||||||
|
|
||||||
resultC := make(chan StateStatus, 1)
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
@ -218,23 +212,25 @@ func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateS
|
||||||
err: ctx.Err(),
|
err: ctx.Err(),
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
case <-waitStop:
|
case status := <-waitC:
|
||||||
case <-waitRemove:
|
resultC <- status
|
||||||
}
|
}
|
||||||
|
|
||||||
s.Lock()
|
|
||||||
result := StateStatus{
|
|
||||||
exitCode: s.ExitCode(),
|
|
||||||
err: s.Err(),
|
|
||||||
}
|
|
||||||
s.Unlock()
|
|
||||||
|
|
||||||
resultC <- result
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return resultC
|
return resultC
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *State) conditionAlreadyMet(condition WaitCondition) bool {
|
||||||
|
switch condition {
|
||||||
|
case WaitConditionNotRunning:
|
||||||
|
return !s.Running
|
||||||
|
case WaitConditionRemoved:
|
||||||
|
return s.Removed
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running.
|
// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running.
|
||||||
func (s *State) IsRunning() bool {
|
func (s *State) IsRunning() bool {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
|
@ -292,8 +288,8 @@ func (s *State) SetStopped(exitStatus *ExitStatus) {
|
||||||
}
|
}
|
||||||
s.ExitCodeValue = exitStatus.ExitCode
|
s.ExitCodeValue = exitStatus.ExitCode
|
||||||
s.OOMKilled = exitStatus.OOMKilled
|
s.OOMKilled = exitStatus.OOMKilled
|
||||||
close(s.waitStop) // fire waiters for stop
|
|
||||||
s.waitStop = make(chan struct{})
|
s.notifyAndClear(&s.stopWaiters)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetRestarting sets the container state to "restarting" without locking.
|
// SetRestarting sets the container state to "restarting" without locking.
|
||||||
|
@ -308,8 +304,8 @@ func (s *State) SetRestarting(exitStatus *ExitStatus) {
|
||||||
s.FinishedAt = time.Now().UTC()
|
s.FinishedAt = time.Now().UTC()
|
||||||
s.ExitCodeValue = exitStatus.ExitCode
|
s.ExitCodeValue = exitStatus.ExitCode
|
||||||
s.OOMKilled = exitStatus.OOMKilled
|
s.OOMKilled = exitStatus.OOMKilled
|
||||||
close(s.waitStop) // fire waiters for stop
|
|
||||||
s.waitStop = make(chan struct{})
|
s.notifyAndClear(&s.stopWaiters)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetError sets the container's error state. This is useful when we want to
|
// SetError sets the container's error state. This is useful when we want to
|
||||||
|
@ -374,22 +370,19 @@ func (s *State) IsDead() bool {
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetRemoved assumes this container is already in the "dead" state and
|
// SetRemoved assumes this container is already in the "dead" state and notifies all waiters.
|
||||||
// closes the internal waitRemove channel to unblock callers waiting for a
|
|
||||||
// container to be removed.
|
|
||||||
func (s *State) SetRemoved() {
|
func (s *State) SetRemoved() {
|
||||||
s.SetRemovalError(nil)
|
s.SetRemovalError(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetRemovalError is to be called in case a container remove failed.
|
// SetRemovalError is to be called in case a container remove failed.
|
||||||
// It sets an error and closes the internal waitRemove channel to unblock
|
// It sets an error and notifies all waiters.
|
||||||
// callers waiting for the container to be removed.
|
|
||||||
func (s *State) SetRemovalError(err error) {
|
func (s *State) SetRemovalError(err error) {
|
||||||
s.SetError(err)
|
s.SetError(err)
|
||||||
s.Lock()
|
s.Lock()
|
||||||
close(s.waitRemove) // Unblock those waiting on remove.
|
s.Removed = true
|
||||||
// Recreate the channel so next ContainerWait will work
|
s.notifyAndClear(&s.removeOnlyWaiters)
|
||||||
s.waitRemove = make(chan struct{})
|
s.notifyAndClear(&s.stopWaiters)
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -400,3 +393,15 @@ func (s *State) Err() error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *State) notifyAndClear(waiters *[]chan<- StateStatus) {
|
||||||
|
result := StateStatus{
|
||||||
|
exitCode: s.ExitCodeValue,
|
||||||
|
err: s.Err(),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range *waiters {
|
||||||
|
c <- result
|
||||||
|
}
|
||||||
|
*waiters = nil
|
||||||
|
}
|
||||||
|
|
|
@ -169,6 +169,31 @@ func TestStateTimeoutWait(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Related issue: #39352
|
||||||
|
func TestCorrectStateWaitResultAfterRestart(t *testing.T) {
|
||||||
|
s := NewState()
|
||||||
|
|
||||||
|
s.Lock()
|
||||||
|
s.SetRunning(0, true)
|
||||||
|
s.Unlock()
|
||||||
|
|
||||||
|
waitC := s.Wait(context.Background(), WaitConditionNotRunning)
|
||||||
|
want := ExitStatus{ExitCode: 10, ExitedAt: time.Now()}
|
||||||
|
|
||||||
|
s.Lock()
|
||||||
|
s.SetRestarting(&want)
|
||||||
|
s.Unlock()
|
||||||
|
|
||||||
|
s.Lock()
|
||||||
|
s.SetRunning(0, true)
|
||||||
|
s.Unlock()
|
||||||
|
|
||||||
|
got := <-waitC
|
||||||
|
if got.exitCode != want.ExitCode {
|
||||||
|
t.Fatalf("expected exit code %v, got %v", want.ExitCode, got.exitCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestIsValidStateString(t *testing.T) {
|
func TestIsValidStateString(t *testing.T) {
|
||||||
states := []struct {
|
states := []struct {
|
||||||
state string
|
state string
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
# To build: docker build -t busybox .
|
# To build: docker build -t busybox .
|
||||||
# To publish: Needs someone with publishing rights
|
# To publish: Needs someone with publishing rights
|
||||||
ARG WINDOWS_BASE_IMAGE=mcr.microsoft.com/windows/servercore
|
ARG WINDOWS_BASE_IMAGE=mcr.microsoft.com/windows/servercore
|
||||||
ARG WINDOWS_BASE_IMAGE_TAG=ltsc2019
|
ARG WINDOWS_BASE_IMAGE_TAG=ltsc2022
|
||||||
ARG BUSYBOX_VERSION=FRP-3329-gcf0fa4d13
|
ARG BUSYBOX_VERSION=FRP-3329-gcf0fa4d13
|
||||||
|
|
||||||
# Checksum taken from https://frippery.org/files/busybox/SHA256SUM
|
# Checksum taken from https://frippery.org/files/busybox/SHA256SUM
|
||||||
|
@ -19,7 +19,7 @@ FROM ${WINDOWS_BASE_IMAGE}:${WINDOWS_BASE_IMAGE_TAG}
|
||||||
RUN mkdir C:\tmp && mkdir C:\bin
|
RUN mkdir C:\tmp && mkdir C:\bin
|
||||||
ARG BUSYBOX_VERSION
|
ARG BUSYBOX_VERSION
|
||||||
ARG BUSYBOX_SHA256SUM
|
ARG BUSYBOX_SHA256SUM
|
||||||
ADD https://frippery.org/files/busybox/busybox-w32-${BUSYBOX_VERSION}.exe /bin/busybox.exe
|
ADD https://github.com/moby/busybox/releases/download/${BUSYBOX_VERSION}/busybox-w32-${BUSYBOX_VERSION}.exe /bin/busybox.exe
|
||||||
RUN powershell \
|
RUN powershell \
|
||||||
if ((Get-FileHash -Path /bin/busybox.exe -Algorithm SHA256).Hash -ne $Env:BUSYBOX_SHA256SUM) { \
|
if ((Get-FileHash -Path /bin/busybox.exe -Algorithm SHA256).Hash -ne $Env:BUSYBOX_SHA256SUM) { \
|
||||||
Throw \"Checksum validation failed\" \
|
Throw \"Checksum validation failed\" \
|
||||||
|
|
|
@ -354,24 +354,24 @@ install_nonsystemd() {
|
||||||
|
|
||||||
cli_ctx_exists() {
|
cli_ctx_exists() {
|
||||||
name="$1"
|
name="$1"
|
||||||
"${BIN}/docker" context inspect -f "{{.Name}}" "${name}" > /dev/null 2>&1
|
"${BIN}/docker" --context=default context inspect -f "{{.Name}}" "${name}" > /dev/null 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
cli_ctx_create() {
|
cli_ctx_create() {
|
||||||
name="$1"
|
name="$1"
|
||||||
host="$2"
|
host="$2"
|
||||||
description="$3"
|
description="$3"
|
||||||
"${BIN}/docker" context create "${name}" --docker "host=${host}" --description "${description}" > /dev/null
|
"${BIN}/docker" --context=default context create "${name}" --docker "host=${host}" --description "${description}" > /dev/null
|
||||||
}
|
}
|
||||||
|
|
||||||
cli_ctx_use() {
|
cli_ctx_use() {
|
||||||
name="$1"
|
name="$1"
|
||||||
"${BIN}/docker" context use "${name}" > /dev/null
|
"${BIN}/docker" --context=default context use "${name}" > /dev/null
|
||||||
}
|
}
|
||||||
|
|
||||||
cli_ctx_rm() {
|
cli_ctx_rm() {
|
||||||
name="$1"
|
name="$1"
|
||||||
"${BIN}/docker" context rm -f "${name}" > /dev/null
|
"${BIN}/docker" --context=default context rm -f "${name}" > /dev/null
|
||||||
}
|
}
|
||||||
|
|
||||||
# CLI subcommand: "install"
|
# CLI subcommand: "install"
|
||||||
|
@ -430,7 +430,12 @@ cmd_entrypoint_uninstall() {
|
||||||
cli_ctx_rm "${CLI_CONTEXT}"
|
cli_ctx_rm "${CLI_CONTEXT}"
|
||||||
INFO "Deleted CLI context \"${CLI_CONTEXT}\""
|
INFO "Deleted CLI context \"${CLI_CONTEXT}\""
|
||||||
fi
|
fi
|
||||||
|
unset DOCKER_HOST
|
||||||
|
unset DOCKER_CONTEXT
|
||||||
|
cli_ctx_use "default"
|
||||||
|
INFO 'Configured CLI use the "default" context.'
|
||||||
|
INFO
|
||||||
|
INFO 'Make sure to unset or update the environment PATH, DOCKER_HOST, and DOCKER_CONTEXT environment variables if you have added them to `~/.bashrc`.'
|
||||||
INFO "This uninstallation tool does NOT remove Docker binaries and data."
|
INFO "This uninstallation tool does NOT remove Docker binaries and data."
|
||||||
INFO "To remove data, run: \`$BIN/rootlesskit rm -rf $HOME/.local/share/docker\`"
|
INFO "To remove data, run: \`$BIN/rootlesskit rm -rf $HOME/.local/share/docker\`"
|
||||||
}
|
}
|
||||||
|
|
|
@ -261,6 +261,10 @@ get_target_arch() {
|
||||||
echo amd64
|
echo amd64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
get_target_variant() {
|
||||||
|
echo "${TARGETVARIANT:-}"
|
||||||
|
}
|
||||||
|
|
||||||
while [ $# -gt 0 ]; do
|
while [ $# -gt 0 ]; do
|
||||||
imageTag="$1"
|
imageTag="$1"
|
||||||
shift
|
shift
|
||||||
|
@ -311,11 +315,13 @@ while [ $# -gt 0 ]; do
|
||||||
|
|
||||||
found=""
|
found=""
|
||||||
targetArch="$(get_target_arch)"
|
targetArch="$(get_target_arch)"
|
||||||
|
targetVariant="$(get_target_variant)"
|
||||||
# parse first level multi-arch manifest
|
# parse first level multi-arch manifest
|
||||||
for i in "${!layers[@]}"; do
|
for i in "${!layers[@]}"; do
|
||||||
layerMeta="${layers[$i]}"
|
layerMeta="${layers[$i]}"
|
||||||
maniArch="$(echo "$layerMeta" | jq --raw-output '.platform.architecture')"
|
maniArch="$(echo "$layerMeta" | jq --raw-output '.platform.architecture')"
|
||||||
if [ "$maniArch" = "${targetArch}" ]; then
|
maniVariant="$(echo "$layerMeta" | jq --raw-output '.platform.variant')"
|
||||||
|
if [[ "$maniArch" = "${targetArch}" ]] && [[ -z "${targetVariant}" || "$maniVariant" = "${targetVariant}" ]]; then
|
||||||
digest="$(echo "$layerMeta" | jq --raw-output '.digest')"
|
digest="$(echo "$layerMeta" | jq --raw-output '.digest')"
|
||||||
# get second level single manifest
|
# get second level single manifest
|
||||||
submanifestJson="$(
|
submanifestJson="$(
|
||||||
|
@ -332,7 +338,7 @@ while [ $# -gt 0 ]; do
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
if [ -z "$found" ]; then
|
if [ -z "$found" ]; then
|
||||||
echo >&2 "error: manifest for $maniArch is not found"
|
echo >&2 "error: manifest for ${targetArch}${targetVariant:+/${targetVariant}} is not found"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
|
@ -8,5 +8,5 @@ import (
|
||||||
func main() {
|
func main() {
|
||||||
fs := http.FileServer(http.Dir("/static"))
|
fs := http.FileServer(http.Dir("/static"))
|
||||||
http.Handle("/", fs)
|
http.Handle("/", fs)
|
||||||
log.Panic(http.ListenAndServe(":80", nil))
|
log.Panic(http.ListenAndServe(":80", nil)) // #nosec G114 -- Ignoring for test-code: G114: Use of net/http serve function that has no support for setting timeouts (gosec)
|
||||||
}
|
}
|
||||||
|
|
|
@ -131,7 +131,7 @@ case "$1" in
|
||||||
restart)
|
restart)
|
||||||
check_init
|
check_init
|
||||||
fail_unless_root
|
fail_unless_root
|
||||||
docker_pid=$(cat "$DOCKER_SSD_PIDFILE" 2> /dev/null)
|
docker_pid=$(cat "$DOCKER_SSD_PIDFILE" 2> /dev/null || true)
|
||||||
[ -n "$docker_pid" ] \
|
[ -n "$docker_pid" ] \
|
||||||
&& ps -p $docker_pid > /dev/null 2>&1 \
|
&& ps -p $docker_pid > /dev/null 2>&1 \
|
||||||
&& $0 stop
|
&& $0 stop
|
||||||
|
|
|
@ -12,9 +12,9 @@ import (
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
enginetypes "github.com/docker/docker/api/types"
|
enginetypes "github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/swarm/runtime"
|
"github.com/docker/docker/api/types/swarm/runtime"
|
||||||
"github.com/docker/docker/pkg/pubsub"
|
|
||||||
"github.com/docker/docker/plugin"
|
"github.com/docker/docker/plugin"
|
||||||
v2 "github.com/docker/docker/plugin/v2"
|
v2 "github.com/docker/docker/plugin/v2"
|
||||||
|
"github.com/moby/pubsub"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -222,7 +222,6 @@ func configReferencesToGRPC(sr []*types.ConfigReference) ([]*swarmapi.ConfigRefe
|
||||||
func configReferencesFromGRPC(sr []*swarmapi.ConfigReference) []*types.ConfigReference {
|
func configReferencesFromGRPC(sr []*swarmapi.ConfigReference) []*types.ConfigReference {
|
||||||
refs := make([]*types.ConfigReference, 0, len(sr))
|
refs := make([]*types.ConfigReference, 0, len(sr))
|
||||||
for _, s := range sr {
|
for _, s := range sr {
|
||||||
|
|
||||||
r := &types.ConfigReference{
|
r := &types.ConfigReference{
|
||||||
ConfigID: s.ConfigID,
|
ConfigID: s.ConfigID,
|
||||||
ConfigName: s.ConfigName,
|
ConfigName: s.ConfigName,
|
||||||
|
|
|
@ -120,7 +120,6 @@ func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint {
|
||||||
NetworkID: v.NetworkID,
|
NetworkID: v.NetworkID,
|
||||||
Addr: v.Addr})
|
Addr: v.Addr})
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return endpoint
|
return endpoint
|
||||||
|
|
|
@ -96,7 +96,6 @@ func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) (*types.ServiceSpec, error)
|
||||||
for _, n := range spec.Networks {
|
for _, n := range spec.Networks {
|
||||||
netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts}
|
netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts}
|
||||||
serviceNetworks = append(serviceNetworks, netConfig)
|
serviceNetworks = append(serviceNetworks, netConfig)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
taskTemplate, err := taskSpecFromGRPC(spec.Task)
|
taskTemplate, err := taskSpecFromGRPC(spec.Task)
|
||||||
|
@ -169,7 +168,6 @@ func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) {
|
||||||
for _, n := range s.TaskTemplate.Networks {
|
for _, n := range s.TaskTemplate.Networks {
|
||||||
netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts}
|
netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts}
|
||||||
taskNetworks = append(taskNetworks, netConfig)
|
taskNetworks = append(taskNetworks, netConfig)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
spec := swarmapi.ServiceSpec{
|
spec := swarmapi.ServiceSpec{
|
||||||
|
@ -473,7 +471,6 @@ func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirem
|
||||||
MemoryBytes: res.Reservations.MemoryBytes,
|
MemoryBytes: res.Reservations.MemoryBytes,
|
||||||
Generic: GenericResourcesToGRPC(res.Reservations.GenericResources),
|
Generic: GenericResourcesToGRPC(res.Reservations.GenericResources),
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return reqs
|
return reqs
|
||||||
|
@ -536,7 +533,6 @@ func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error
|
||||||
}
|
}
|
||||||
if p.MaxAttempts != nil {
|
if p.MaxAttempts != nil {
|
||||||
rp.MaxAttempts = *p.MaxAttempts
|
rp.MaxAttempts = *p.MaxAttempts
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return rp, nil
|
return rp, nil
|
||||||
|
|
|
@ -455,7 +455,6 @@ func (c *containerAdapter) createVolumes(ctx context.Context) error {
|
||||||
// It returns an error if the driver name is different - that is a valid error
|
// It returns an error if the driver name is different - that is a valid error
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -257,7 +257,7 @@ func (c *containerConfig) labels() map[string]string {
|
||||||
func (c *containerConfig) mounts(deps exec.VolumeGetter) []enginemount.Mount {
|
func (c *containerConfig) mounts(deps exec.VolumeGetter) []enginemount.Mount {
|
||||||
var r []enginemount.Mount
|
var r []enginemount.Mount
|
||||||
for _, mount := range c.spec().Mounts {
|
for _, mount := range c.spec().Mounts {
|
||||||
if mount.Type == api.MountTypeCSI {
|
if mount.Type == api.MountTypeCluster {
|
||||||
r = append(r, c.convertCSIMount(mount, deps))
|
r = append(r, c.convertCSIMount(mount, deps))
|
||||||
} else {
|
} else {
|
||||||
r = append(r, convertMount(mount))
|
r = append(r, convertMount(mount))
|
||||||
|
@ -308,7 +308,7 @@ func convertMount(m api.Mount) enginemount.Mount {
|
||||||
mount.Type = enginemount.TypeTmpfs
|
mount.Type = enginemount.TypeTmpfs
|
||||||
case api.MountTypeNamedPipe:
|
case api.MountTypeNamedPipe:
|
||||||
mount.Type = enginemount.TypeNamedPipe
|
mount.Type = enginemount.TypeNamedPipe
|
||||||
case api.MountTypeCSI:
|
case api.MountTypeCluster:
|
||||||
mount.Type = enginemount.TypeCluster
|
mount.Type = enginemount.TypeCluster
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHealthStates(t *testing.T) {
|
func TestHealthStates(t *testing.T) {
|
||||||
|
|
||||||
// set up environment: events, task, container ....
|
// set up environment: events, task, container ....
|
||||||
e := events.New()
|
e := events.New()
|
||||||
_, l, _ := e.Subscribe()
|
_, l, _ := e.Subscribe()
|
||||||
|
|
|
@ -37,7 +37,7 @@ func validateMounts(mounts []api.Mount) error {
|
||||||
if mount.Source == "" {
|
if mount.Source == "" {
|
||||||
return errors.New("invalid npipe source, source must not be empty")
|
return errors.New("invalid npipe source, source must not be empty")
|
||||||
}
|
}
|
||||||
case api.MountTypeCSI:
|
case api.MountTypeCluster:
|
||||||
// nothing to do here.
|
// nothing to do here.
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("invalid mount type: %s", mount.Type)
|
return fmt.Errorf("invalid mount type: %s", mount.Type)
|
||||||
|
|
|
@ -205,9 +205,9 @@ func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveInputIPAddr tries to resolve the IP address from the string passed as input
|
// resolveInputIPAddr tries to resolve the IP address from the string passed as input
|
||||||
// - tries to match the string as an interface name, if so returns the IP address associated with it
|
// - tries to match the string as an interface name, if so returns the IP address associated with it
|
||||||
// - on failure of previous step tries to parse the string as an IP address itself
|
// - on failure of previous step tries to parse the string as an IP address itself
|
||||||
// if succeeds returns the IP address
|
// if succeeds returns the IP address
|
||||||
func resolveInputIPAddr(input string, isUnspecifiedValid bool) (net.IP, error) {
|
func resolveInputIPAddr(input string, isUnspecifiedValid bool) (net.IP, error) {
|
||||||
// Try to see if it is an interface name
|
// Try to see if it is an interface name
|
||||||
interfaceAddr, err := resolveInterfaceAddr(input)
|
interfaceAddr, err := resolveInterfaceAddr(input)
|
||||||
|
|
|
@ -247,14 +247,11 @@ func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string, queryRe
|
||||||
logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
|
logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
|
||||||
// warning in the client response should be concise
|
// warning in the client response should be concise
|
||||||
resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image))
|
resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image))
|
||||||
|
|
||||||
} else if ctnr.Image != digestImage {
|
} else if ctnr.Image != digestImage {
|
||||||
logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
|
logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
|
||||||
ctnr.Image = digestImage
|
ctnr.Image = digestImage
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image)
|
logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replace the context with a fresh one.
|
// Replace the context with a fresh one.
|
||||||
|
@ -286,7 +283,6 @@ func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec typ
|
||||||
var resp *apitypes.ServiceUpdateResponse
|
var resp *apitypes.ServiceUpdateResponse
|
||||||
|
|
||||||
err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
|
err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
|
||||||
|
|
||||||
err := c.populateNetworkID(ctx, state.controlClient, &spec)
|
err := c.populateNetworkID(ctx, state.controlClient, &spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -2,7 +2,6 @@ package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -28,7 +27,7 @@ func (x *BuilderGCFilter) MarshalJSON() ([]byte, error) {
|
||||||
for _, k := range keys {
|
for _, k := range keys {
|
||||||
values := f.Get(k)
|
values := f.Get(k)
|
||||||
for _, v := range values {
|
for _, v := range values {
|
||||||
arr = append(arr, fmt.Sprintf("%s=%s", k, v))
|
arr = append(arr, k+"="+v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return json.Marshal(arr)
|
return json.Marshal(arr)
|
||||||
|
@ -45,9 +44,9 @@ func (x *BuilderGCFilter) UnmarshalJSON(data []byte) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, s := range arr {
|
for _, s := range arr {
|
||||||
fields := strings.SplitN(s, "=", 2)
|
name, value, _ := strings.Cut(s, "=")
|
||||||
name := strings.ToLower(strings.TrimSpace(fields[0]))
|
name = strings.ToLower(strings.TrimSpace(name))
|
||||||
value := strings.TrimSpace(fields[1])
|
value = strings.TrimSpace(value)
|
||||||
f.Add(name, value)
|
f.Add(name, value)
|
||||||
}
|
}
|
||||||
*x = BuilderGCFilter(f)
|
*x = BuilderGCFilter(f)
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package config
|
package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
|
@ -42,3 +43,16 @@ func TestBuilderGC(t *testing.T) {
|
||||||
assert.Assert(t, filters.Args(cfg.Builder.GC.Policy[0].Filter).UniqueExactMatch("unused-for", "2200h"))
|
assert.Assert(t, filters.Args(cfg.Builder.GC.Policy[0].Filter).UniqueExactMatch("unused-for", "2200h"))
|
||||||
assert.Assert(t, filters.Args(cfg.Builder.GC.Policy[1].Filter).UniqueExactMatch("unused-for", "3300h"))
|
assert.Assert(t, filters.Args(cfg.Builder.GC.Policy[1].Filter).UniqueExactMatch("unused-for", "3300h"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestBuilderGCFilterUnmarshal is a regression test for https://github.com/moby/moby/issues/44361,
|
||||||
|
// where and incorrectly formatted gc filter option ("unused-for2200h",
|
||||||
|
// missing a "=" separator). resulted in a panic during unmarshal.
|
||||||
|
func TestBuilderGCFilterUnmarshal(t *testing.T) {
|
||||||
|
var cfg BuilderGCConfig
|
||||||
|
err := json.Unmarshal([]byte(`{"poliCy": [{"keepStorage": "10GB", "filter": ["unused-for2200h"]}]}`), &cfg)
|
||||||
|
assert.Check(t, err)
|
||||||
|
expectedPolicy := []BuilderGCRule{{
|
||||||
|
KeepStorage: "10GB", Filter: BuilderGCFilter(filters.NewArgs(filters.Arg("unused-for2200h", ""))),
|
||||||
|
}}
|
||||||
|
assert.DeepEqual(t, cfg.Policy, expectedPolicy, cmp.AllowUnexported(BuilderGCFilter{}))
|
||||||
|
}
|
||||||
|
|
|
@ -7,9 +7,11 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/runtime/v2/shim"
|
||||||
"github.com/docker/docker/opts"
|
"github.com/docker/docker/opts"
|
||||||
"github.com/docker/docker/pkg/authorization"
|
"github.com/docker/docker/pkg/authorization"
|
||||||
"github.com/docker/docker/registry"
|
"github.com/docker/docker/registry"
|
||||||
|
@ -22,11 +24,11 @@ import (
|
||||||
const (
|
const (
|
||||||
// DefaultMaxConcurrentDownloads is the default value for
|
// DefaultMaxConcurrentDownloads is the default value for
|
||||||
// maximum number of downloads that
|
// maximum number of downloads that
|
||||||
// may take place at a time for each pull.
|
// may take place at a time.
|
||||||
DefaultMaxConcurrentDownloads = 3
|
DefaultMaxConcurrentDownloads = 3
|
||||||
// DefaultMaxConcurrentUploads is the default value for
|
// DefaultMaxConcurrentUploads is the default value for
|
||||||
// maximum number of uploads that
|
// maximum number of uploads that
|
||||||
// may take place at a time for each push.
|
// may take place at a time.
|
||||||
DefaultMaxConcurrentUploads = 5
|
DefaultMaxConcurrentUploads = 5
|
||||||
// DefaultDownloadAttempts is the default value for
|
// DefaultDownloadAttempts is the default value for
|
||||||
// maximum number of attempts that
|
// maximum number of attempts that
|
||||||
|
@ -157,7 +159,7 @@ type CommonConfig struct {
|
||||||
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
|
NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"`
|
||||||
Pidfile string `json:"pidfile,omitempty"`
|
Pidfile string `json:"pidfile,omitempty"`
|
||||||
RawLogs bool `json:"raw-logs,omitempty"`
|
RawLogs bool `json:"raw-logs,omitempty"`
|
||||||
RootDeprecated string `json:"graph,omitempty"`
|
RootDeprecated string `json:"graph,omitempty"` // Deprecated: use Root instead. TODO(thaJeztah): remove in next release.
|
||||||
Root string `json:"data-root,omitempty"`
|
Root string `json:"data-root,omitempty"`
|
||||||
ExecRoot string `json:"exec-root,omitempty"`
|
ExecRoot string `json:"exec-root,omitempty"`
|
||||||
SocketGroup string `json:"group,omitempty"`
|
SocketGroup string `json:"group,omitempty"`
|
||||||
|
@ -239,7 +241,7 @@ type CommonConfig struct {
|
||||||
|
|
||||||
DNSConfig
|
DNSConfig
|
||||||
LogConfig
|
LogConfig
|
||||||
BridgeConfig // bridgeConfig holds bridge network specific configuration.
|
BridgeConfig // BridgeConfig holds bridge network specific configuration.
|
||||||
NetworkConfig
|
NetworkConfig
|
||||||
registry.ServiceOptions
|
registry.ServiceOptions
|
||||||
|
|
||||||
|
@ -321,19 +323,19 @@ func New() *Config {
|
||||||
func GetConflictFreeLabels(labels []string) ([]string, error) {
|
func GetConflictFreeLabels(labels []string) ([]string, error) {
|
||||||
labelMap := map[string]string{}
|
labelMap := map[string]string{}
|
||||||
for _, label := range labels {
|
for _, label := range labels {
|
||||||
stringSlice := strings.SplitN(label, "=", 2)
|
key, val, ok := strings.Cut(label, "=")
|
||||||
if len(stringSlice) > 1 {
|
if ok {
|
||||||
// If there is a conflict we will return an error
|
// If there is a conflict we will return an error
|
||||||
if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] {
|
if v, ok := labelMap[key]; ok && v != val {
|
||||||
return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v)
|
return nil, errors.Errorf("conflict labels for %s=%s and %s=%s", key, val, key, v)
|
||||||
}
|
}
|
||||||
labelMap[stringSlice[0]] = stringSlice[1]
|
labelMap[key] = val
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
newLabels := []string{}
|
newLabels := []string{}
|
||||||
for k, v := range labelMap {
|
for k, v := range labelMap {
|
||||||
newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v))
|
newLabels = append(newLabels, k+"="+v)
|
||||||
}
|
}
|
||||||
return newLabels, nil
|
return newLabels, nil
|
||||||
}
|
}
|
||||||
|
@ -479,16 +481,6 @@ func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Con
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.RootDeprecated != "" {
|
|
||||||
logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`)
|
|
||||||
|
|
||||||
if config.Root != "" {
|
|
||||||
return nil, errors.New(`cannot specify both "graph" and "data-root" config file options`)
|
|
||||||
}
|
|
||||||
|
|
||||||
config.Root = config.RootDeprecated
|
|
||||||
}
|
|
||||||
|
|
||||||
return &config, nil
|
return &config, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -536,7 +528,7 @@ func findConfigurationConflicts(config map[string]interface{}, flags *pflag.Flag
|
||||||
for key := range unknownKeys {
|
for key := range unknownKeys {
|
||||||
unknown = append(unknown, key)
|
unknown = append(unknown, key)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
|
return errors.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", "))
|
||||||
}
|
}
|
||||||
|
|
||||||
var conflicts []string
|
var conflicts []string
|
||||||
|
@ -570,7 +562,7 @@ func findConfigurationConflicts(config map[string]interface{}, flags *pflag.Flag
|
||||||
flags.Visit(duplicatedConflicts)
|
flags.Visit(duplicatedConflicts)
|
||||||
|
|
||||||
if len(conflicts) > 0 {
|
if len(conflicts) > 0 {
|
||||||
return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
|
return errors.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", "))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -579,10 +571,15 @@ func findConfigurationConflicts(config map[string]interface{}, flags *pflag.Flag
|
||||||
// such as config.DNS, config.Labels, config.DNSSearch,
|
// such as config.DNS, config.Labels, config.DNSSearch,
|
||||||
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
|
// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads and config.MaxDownloadAttempts.
|
||||||
func Validate(config *Config) error {
|
func Validate(config *Config) error {
|
||||||
|
//nolint:staticcheck // TODO(thaJeztah): remove in next release.
|
||||||
|
if config.RootDeprecated != "" {
|
||||||
|
return errors.New(`the "graph" config file option is deprecated; use "data-root" instead`)
|
||||||
|
}
|
||||||
|
|
||||||
// validate log-level
|
// validate log-level
|
||||||
if config.LogLevel != "" {
|
if config.LogLevel != "" {
|
||||||
if _, err := logrus.ParseLevel(config.LogLevel); err != nil {
|
if _, err := logrus.ParseLevel(config.LogLevel); err != nil {
|
||||||
return fmt.Errorf("invalid logging level: %s", config.LogLevel)
|
return errors.Errorf("invalid logging level: %s", config.LogLevel)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -609,22 +606,22 @@ func Validate(config *Config) error {
|
||||||
|
|
||||||
// TODO(thaJeztah) Validations below should not accept "0" to be valid; see Validate() for a more in-depth description of this problem
|
// TODO(thaJeztah) Validations below should not accept "0" to be valid; see Validate() for a more in-depth description of this problem
|
||||||
if config.Mtu < 0 {
|
if config.Mtu < 0 {
|
||||||
return fmt.Errorf("invalid default MTU: %d", config.Mtu)
|
return errors.Errorf("invalid default MTU: %d", config.Mtu)
|
||||||
}
|
}
|
||||||
if config.MaxConcurrentDownloads < 0 {
|
if config.MaxConcurrentDownloads < 0 {
|
||||||
return fmt.Errorf("invalid max concurrent downloads: %d", config.MaxConcurrentDownloads)
|
return errors.Errorf("invalid max concurrent downloads: %d", config.MaxConcurrentDownloads)
|
||||||
}
|
}
|
||||||
if config.MaxConcurrentUploads < 0 {
|
if config.MaxConcurrentUploads < 0 {
|
||||||
return fmt.Errorf("invalid max concurrent uploads: %d", config.MaxConcurrentUploads)
|
return errors.Errorf("invalid max concurrent uploads: %d", config.MaxConcurrentUploads)
|
||||||
}
|
}
|
||||||
if config.MaxDownloadAttempts < 0 {
|
if config.MaxDownloadAttempts < 0 {
|
||||||
return fmt.Errorf("invalid max download attempts: %d", config.MaxDownloadAttempts)
|
return errors.Errorf("invalid max download attempts: %d", config.MaxDownloadAttempts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate that "default" runtime is not reset
|
// validate that "default" runtime is not reset
|
||||||
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
|
if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 {
|
||||||
if _, ok := runtimes[StockRuntimeName]; ok {
|
if _, ok := runtimes[StockRuntimeName]; ok {
|
||||||
return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName)
|
return errors.Errorf("runtime name '%s' is reserved", StockRuntimeName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -635,8 +632,8 @@ func Validate(config *Config) error {
|
||||||
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
|
if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" {
|
||||||
if !builtinRuntimes[defaultRuntime] {
|
if !builtinRuntimes[defaultRuntime] {
|
||||||
runtimes := config.GetAllRuntimes()
|
runtimes := config.GetAllRuntimes()
|
||||||
if _, ok := runtimes[defaultRuntime]; !ok {
|
if _, ok := runtimes[defaultRuntime]; !ok && !IsPermissibleC8dRuntimeName(defaultRuntime) {
|
||||||
return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
|
return errors.Errorf("specified default runtime '%s' does not exist", defaultRuntime)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -669,3 +666,37 @@ func MaskCredentials(rawURL string) string {
|
||||||
parsedURL.User = url.UserPassword("xxxxx", "xxxxx")
|
parsedURL.User = url.UserPassword("xxxxx", "xxxxx")
|
||||||
return parsedURL.String()
|
return parsedURL.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsPermissibleC8dRuntimeName tests whether name is safe to pass into
|
||||||
|
// containerd as a runtime name, and whether the name is well-formed.
|
||||||
|
// It does not check if the runtime is installed.
|
||||||
|
//
|
||||||
|
// A runtime name containing slash characters is interpreted by containerd as
|
||||||
|
// the path to a runtime binary. If we allowed this, anyone with Engine API
|
||||||
|
// access could get containerd to execute an arbitrary binary as root. Although
|
||||||
|
// Engine API access is already equivalent to root on the host, the runtime name
|
||||||
|
// has not historically been a vector to run arbitrary code as root so users are
|
||||||
|
// not expecting it to become one.
|
||||||
|
//
|
||||||
|
// This restriction is not configurable. There are viable workarounds for
|
||||||
|
// legitimate use cases: administrators and runtime developers can make runtimes
|
||||||
|
// available for use with Docker by installing them onto PATH following the
|
||||||
|
// [binary naming convention] for containerd Runtime v2.
|
||||||
|
//
|
||||||
|
// [binary naming convention]: https://github.com/containerd/containerd/blob/main/runtime/v2/README.md#binary-naming
|
||||||
|
func IsPermissibleC8dRuntimeName(name string) bool {
|
||||||
|
// containerd uses a rather permissive test to validate runtime names:
|
||||||
|
//
|
||||||
|
// - Any name for which filepath.IsAbs(name) is interpreted as the absolute
|
||||||
|
// path to a shim binary. We want to block this behaviour.
|
||||||
|
// - Any name which contains at least one '.' character and no '/' characters
|
||||||
|
// and does not begin with a '.' character is a valid runtime name. The shim
|
||||||
|
// binary name is derived from the final two components of the name and
|
||||||
|
// searched for on the PATH. The name "a.." is technically valid per
|
||||||
|
// containerd's implementation: it would resolve to a binary named
|
||||||
|
// "containerd-shim---".
|
||||||
|
//
|
||||||
|
// https://github.com/containerd/containerd/blob/11ded166c15f92450958078cd13c6d87131ec563/runtime/v2/manager.go#L297-L317
|
||||||
|
// https://github.com/containerd/containerd/blob/11ded166c15f92450958078cd13c6d87131ec563/runtime/v2/shim/util.go#L83-L93
|
||||||
|
return !filepath.IsAbs(name) && !strings.ContainsRune(name, '/') && shim.BinaryName(name) != ""
|
||||||
|
}
|
||||||
|
|
|
@ -139,18 +139,6 @@ func TestUnixValidateConfigurationErrors(t *testing.T) {
|
||||||
},
|
},
|
||||||
expectedErr: `runtime name 'runc' is reserved`,
|
expectedErr: `runtime name 'runc' is reserved`,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
doc: `default runtime should be present in runtimes`,
|
|
||||||
config: &Config{
|
|
||||||
Runtimes: map[string]types.Runtime{
|
|
||||||
"foo": {},
|
|
||||||
},
|
|
||||||
CommonConfig: CommonConfig{
|
|
||||||
DefaultRuntime: "bar",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expectedErr: `specified default runtime 'bar' does not exist`,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
tc := tc
|
tc := tc
|
||||||
|
|
|
@ -28,11 +28,11 @@ import (
|
||||||
|
|
||||||
// GetContainer looks for a container using the provided information, which could be
|
// GetContainer looks for a container using the provided information, which could be
|
||||||
// one of the following inputs from the caller:
|
// one of the following inputs from the caller:
|
||||||
// - A full container ID, which will exact match a container in daemon's list
|
// - A full container ID, which will exact match a container in daemon's list
|
||||||
// - A container name, which will only exact match via the GetByName() function
|
// - A container name, which will only exact match via the GetByName() function
|
||||||
// - A partial container ID prefix (e.g. short ID) of any length that is
|
// - A partial container ID prefix (e.g. short ID) of any length that is
|
||||||
// unique enough to only return a single container object
|
// unique enough to only return a single container object
|
||||||
// If none of these searches succeed, an error is returned
|
// If none of these searches succeed, an error is returned
|
||||||
func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) {
|
func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) {
|
||||||
if len(prefixOrName) == 0 {
|
if len(prefixOrName) == 0 {
|
||||||
return nil, errors.WithStack(invalidIdentifier(prefixOrName))
|
return nil, errors.WithStack(invalidIdentifier(prefixOrName))
|
||||||
|
|
|
@ -562,7 +562,6 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) (retErr er
|
||||||
if err := daemon.connectToNetwork(container, defaultNetName, nConf.EndpointSettings, updateSettings); err != nil {
|
if err := daemon.connectToNetwork(container, defaultNetName, nConf.EndpointSettings, updateSettings); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// the intermediate map is necessary because "connectToNetwork" modifies "container.NetworkSettings.Networks"
|
// the intermediate map is necessary because "connectToNetwork" modifies "container.NetworkSettings.Networks"
|
||||||
|
@ -601,7 +600,6 @@ func (daemon *Daemon) allocateNetwork(container *container.Container) (retErr er
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := container.WriteHostConfig(); err != nil {
|
if _, err := container.WriteHostConfig(); err != nil {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package daemon
|
package daemon
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
@ -8,11 +9,14 @@ import (
|
||||||
"github.com/containerd/containerd/content/local"
|
"github.com/containerd/containerd/content/local"
|
||||||
"github.com/containerd/containerd/leases"
|
"github.com/containerd/containerd/leases"
|
||||||
"github.com/containerd/containerd/metadata"
|
"github.com/containerd/containerd/metadata"
|
||||||
|
"github.com/containerd/containerd/namespaces"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"go.etcd.io/bbolt"
|
"go.etcd.io/bbolt"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (daemon *Daemon) configureLocalContentStore() (content.Store, leases.Manager, error) {
|
func (daemon *Daemon) configureLocalContentStore(ns string) (content.Store, leases.Manager, error) {
|
||||||
if err := os.MkdirAll(filepath.Join(daemon.root, "content"), 0700); err != nil {
|
if err := os.MkdirAll(filepath.Join(daemon.root, "content"), 0700); err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "error creating dir for content store")
|
return nil, nil, errors.Wrap(err, "error creating dir for content store")
|
||||||
}
|
}
|
||||||
|
@ -26,5 +30,128 @@ func (daemon *Daemon) configureLocalContentStore() (content.Store, leases.Manage
|
||||||
}
|
}
|
||||||
md := metadata.NewDB(db, cs, nil)
|
md := metadata.NewDB(db, cs, nil)
|
||||||
daemon.mdDB = db
|
daemon.mdDB = db
|
||||||
return md.ContentStore(), metadata.NewLeaseManager(md), nil
|
return namespacedContentProvider(md.ContentStore(), ns), namespacedLeaseManager(metadata.NewLeaseManager(md), ns), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// withDefaultNamespace sets the given namespace on the context if the current
|
||||||
|
// context doesn't hold any namespace
|
||||||
|
func withDefaultNamespace(ctx context.Context, namespace string) context.Context {
|
||||||
|
if _, ok := namespaces.Namespace(ctx); ok {
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
return namespaces.WithNamespace(ctx, namespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
type namespacedContent struct {
|
||||||
|
ns string
|
||||||
|
provider content.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes the content from the store.
|
||||||
|
func (cp namespacedContent) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||||
|
return cp.provider.Delete(withDefaultNamespace(ctx, cp.ns), dgst)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info will return metadata about content available in the content store.
|
||||||
|
//
|
||||||
|
// If the content is not present, ErrNotFound will be returned.
|
||||||
|
func (cp namespacedContent) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
|
||||||
|
return cp.provider.Info(withDefaultNamespace(ctx, cp.ns), dgst)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update updates mutable information related to content.
|
||||||
|
// If one or more fieldpaths are provided, only those
|
||||||
|
// fields will be updated.
|
||||||
|
// Mutable fields:
|
||||||
|
//
|
||||||
|
// labels.*
|
||||||
|
func (cp namespacedContent) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
|
||||||
|
return cp.provider.Update(withDefaultNamespace(ctx, cp.ns), info, fieldpaths...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk will call fn for each item in the content store which
|
||||||
|
// match the provided filters. If no filters are given all
|
||||||
|
// items will be walked.
|
||||||
|
func (cp namespacedContent) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
|
||||||
|
return cp.provider.Walk(withDefaultNamespace(ctx, cp.ns), fn, filters...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Abort completely cancels the ingest operation targeted by ref.
|
||||||
|
func (cp namespacedContent) Abort(ctx context.Context, ref string) error {
|
||||||
|
return cp.provider.Abort(withDefaultNamespace(ctx, cp.ns), ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListStatuses returns the status of any active ingestions whose ref match the
|
||||||
|
// provided regular expression. If empty, all active ingestions will be
|
||||||
|
// returned.
|
||||||
|
func (cp namespacedContent) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) {
|
||||||
|
return cp.provider.ListStatuses(withDefaultNamespace(ctx, cp.ns), filters...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status returns the status of the provided ref.
|
||||||
|
func (cp namespacedContent) Status(ctx context.Context, ref string) (content.Status, error) {
|
||||||
|
return cp.provider.Status(withDefaultNamespace(ctx, cp.ns), ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Some implementations require WithRef to be included in opts.
|
||||||
|
func (cp namespacedContent) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
|
||||||
|
return cp.provider.Writer(withDefaultNamespace(ctx, cp.ns), opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReaderAt only requires desc.Digest to be set.
|
||||||
|
// Other fields in the descriptor may be used internally for resolving
|
||||||
|
// the location of the actual data.
|
||||||
|
func (cp namespacedContent) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
|
||||||
|
return cp.provider.ReaderAt(withDefaultNamespace(ctx, cp.ns), desc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// namespacedContentProvider sets the namespace if missing before calling the inner provider
|
||||||
|
func namespacedContentProvider(provider content.Store, ns string) content.Store {
|
||||||
|
return namespacedContent{
|
||||||
|
ns,
|
||||||
|
provider,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type namespacedLeases struct {
|
||||||
|
ns string
|
||||||
|
manager leases.Manager
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddResource references the resource by the provided lease.
|
||||||
|
func (nl namespacedLeases) AddResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error {
|
||||||
|
return nl.manager.AddResource(withDefaultNamespace(ctx, nl.ns), lease, resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create creates a new lease using the provided lease
|
||||||
|
func (nl namespacedLeases) Create(ctx context.Context, opt ...leases.Opt) (leases.Lease, error) {
|
||||||
|
return nl.manager.Create(withDefaultNamespace(ctx, nl.ns), opt...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the lease with the provided lease ID
|
||||||
|
func (nl namespacedLeases) Delete(ctx context.Context, lease leases.Lease, opt ...leases.DeleteOpt) error {
|
||||||
|
return nl.manager.Delete(withDefaultNamespace(ctx, nl.ns), lease, opt...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteResource dereferences the resource by the provided lease.
|
||||||
|
func (nl namespacedLeases) DeleteResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error {
|
||||||
|
return nl.manager.DeleteResource(withDefaultNamespace(ctx, nl.ns), lease, resource)
|
||||||
|
}
|
||||||
|
|
||||||
|
// List lists all active leases
|
||||||
|
func (nl namespacedLeases) List(ctx context.Context, filter ...string) ([]leases.Lease, error) {
|
||||||
|
return nl.manager.List(withDefaultNamespace(ctx, nl.ns), filter...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListResources lists all the resources referenced by the lease.
|
||||||
|
func (nl namespacedLeases) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) {
|
||||||
|
return nl.manager.ListResources(withDefaultNamespace(ctx, nl.ns), lease)
|
||||||
|
}
|
||||||
|
|
||||||
|
// namespacedLeaseManager sets the namespace if missing before calling the inner manager
|
||||||
|
func namespacedLeaseManager(manager leases.Manager, ns string) leases.Manager {
|
||||||
|
return namespacedLeases{
|
||||||
|
ns,
|
||||||
|
manager,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,6 @@ import (
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/docker/docker/image"
|
"github.com/docker/docker/image"
|
||||||
"github.com/docker/docker/pkg/idtools"
|
"github.com/docker/docker/pkg/idtools"
|
||||||
"github.com/docker/docker/pkg/system"
|
|
||||||
"github.com/docker/docker/runconfig"
|
"github.com/docker/docker/runconfig"
|
||||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/opencontainers/selinux/go-selinux"
|
"github.com/opencontainers/selinux/go-selinux"
|
||||||
|
@ -124,9 +123,6 @@ func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
os = img.OperatingSystem()
|
os = img.OperatingSystem()
|
||||||
if !system.IsOSSupported(os) {
|
|
||||||
return nil, system.ErrNotSupportedOperatingSystem
|
|
||||||
}
|
|
||||||
imgID = img.ID()
|
imgID = img.ID()
|
||||||
} else if isWindows {
|
} else if isWindows {
|
||||||
os = "linux" // 'scratch' case.
|
os = "linux" // 'scratch' case.
|
||||||
|
|
|
@ -519,6 +519,9 @@ func (daemon *Daemon) restore() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := daemon.prepareMountPoints(c); err != nil {
|
||||||
|
log.WithError(err).Error("failed to prepare mount points for container")
|
||||||
|
}
|
||||||
if err := daemon.containerStart(c, "", "", true); err != nil {
|
if err := daemon.containerStart(c, "", "", true); err != nil {
|
||||||
log.WithError(err).Error("failed to start container")
|
log.WithError(err).Error("failed to start container")
|
||||||
}
|
}
|
||||||
|
@ -1070,7 +1073,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
||||||
imgSvcConfig.Leases = d.containerdCli.LeasesService()
|
imgSvcConfig.Leases = d.containerdCli.LeasesService()
|
||||||
imgSvcConfig.ContentStore = d.containerdCli.ContentStore()
|
imgSvcConfig.ContentStore = d.containerdCli.ContentStore()
|
||||||
} else {
|
} else {
|
||||||
cs, lm, err := d.configureLocalContentStore()
|
cs, lm, err := d.configureLocalContentStore(config.ContainerdNamespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -342,5 +342,4 @@ func TestRootMountCleanup(t *testing.T) {
|
||||||
checkMounted(t, cfg.Root, false)
|
checkMounted(t, cfg.Root, false)
|
||||||
assert.Assert(t, d.cleanupMounts())
|
assert.Assert(t, d.cleanupMounts())
|
||||||
})
|
})
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -558,7 +558,6 @@ func verifyPlatformContainerResources(resources *containertypes.Resources, sysIn
|
||||||
if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice {
|
if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice {
|
||||||
warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.")
|
warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.")
|
||||||
resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{}
|
resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{}
|
||||||
|
|
||||||
}
|
}
|
||||||
if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice {
|
if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice {
|
||||||
warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.")
|
warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.")
|
||||||
|
@ -707,8 +706,8 @@ func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.
|
||||||
hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName()
|
hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName()
|
||||||
}
|
}
|
||||||
|
|
||||||
if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil {
|
if _, err := daemon.getRuntime(hostConfig.Runtime); err != nil {
|
||||||
return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime)
|
return warnings, err
|
||||||
}
|
}
|
||||||
|
|
||||||
parser := volumemounts.NewParser()
|
parser := volumemounts.NewParser()
|
||||||
|
@ -764,7 +763,9 @@ func verifyDaemonSettings(conf *config.Config) error {
|
||||||
configureRuntimes(conf)
|
configureRuntimes(conf)
|
||||||
if rtName := conf.GetDefaultRuntimeName(); rtName != "" {
|
if rtName := conf.GetDefaultRuntimeName(); rtName != "" {
|
||||||
if conf.GetRuntime(rtName) == nil {
|
if conf.GetRuntime(rtName) == nil {
|
||||||
return fmt.Errorf("specified default runtime '%s' does not exist", rtName)
|
if !config.IsPermissibleC8dRuntimeName(rtName) {
|
||||||
|
return fmt.Errorf("specified default runtime '%s' does not exist", rtName)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -1077,17 +1078,17 @@ func setupInitLayer(idMapping idtools.IdentityMapping) func(containerfs.Containe
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse the remapped root (user namespace) option, which can be one of:
|
// Parse the remapped root (user namespace) option, which can be one of:
|
||||||
// username - valid username from /etc/passwd
|
|
||||||
// username:groupname - valid username; valid groupname from /etc/group
|
|
||||||
// uid - 32-bit unsigned int valid Linux UID value
|
|
||||||
// uid:gid - uid value; 32-bit unsigned int Linux GID value
|
|
||||||
//
|
//
|
||||||
// If no groupname is specified, and a username is specified, an attempt
|
// - username - valid username from /etc/passwd
|
||||||
// will be made to lookup a gid for that username as a groupname
|
// - username:groupname - valid username; valid groupname from /etc/group
|
||||||
|
// - uid - 32-bit unsigned int valid Linux UID value
|
||||||
|
// - uid:gid - uid value; 32-bit unsigned int Linux GID value
|
||||||
//
|
//
|
||||||
// If names are used, they are verified to exist in passwd/group
|
// If no groupname is specified, and a username is specified, an attempt
|
||||||
|
// will be made to lookup a gid for that username as a groupname
|
||||||
|
//
|
||||||
|
// If names are used, they are verified to exist in passwd/group
|
||||||
func parseRemappedRoot(usergrp string) (string, string, error) {
|
func parseRemappedRoot(usergrp string) (string, string, error) {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
userID, groupID int
|
userID, groupID int
|
||||||
username, groupname string
|
username, groupname string
|
||||||
|
|
|
@ -264,6 +264,35 @@ func (daemon *Daemon) initNetworkController(activeSandboxes map[string]interface
|
||||||
}
|
}
|
||||||
|
|
||||||
if !found {
|
if !found {
|
||||||
|
// non-default nat networks should be re-created if missing from HNS
|
||||||
|
if v.Type() == "nat" && v.Name() != "nat" {
|
||||||
|
_, _, v4Conf, v6Conf := v.Info().IpamConfig()
|
||||||
|
netOption := map[string]string{}
|
||||||
|
for k, v := range v.Info().DriverOptions() {
|
||||||
|
if k != winlibnetwork.NetworkName && k != winlibnetwork.HNSID {
|
||||||
|
netOption[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
name := v.Name()
|
||||||
|
id := v.ID()
|
||||||
|
|
||||||
|
err = v.Delete()
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("Error occurred when removing network %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := daemon.netController.NewNetwork("nat", name, id,
|
||||||
|
libnetwork.NetworkOptionGeneric(options.Generic{
|
||||||
|
netlabel.GenericData: netOption,
|
||||||
|
}),
|
||||||
|
libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("Error occurred when creating network %v", err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// global networks should not be deleted by local HNS
|
// global networks should not be deleted by local HNS
|
||||||
if v.Info().Scope() != datastore.GlobalScope {
|
if v.Info().Scope() != datastore.GlobalScope {
|
||||||
err = v.Delete()
|
err = v.Delete()
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
eventtypes "github.com/docker/docker/api/types/events"
|
eventtypes "github.com/docker/docker/api/types/events"
|
||||||
"github.com/docker/docker/pkg/pubsub"
|
"github.com/moby/pubsub"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
@ -23,9 +23,6 @@ import (
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Seconds to wait after sending TERM before trying KILL
|
|
||||||
const termProcessTimeout = 10 * time.Second
|
|
||||||
|
|
||||||
func (daemon *Daemon) registerExecCommand(container *container.Container, config *exec.Config) {
|
func (daemon *Daemon) registerExecCommand(container *container.Container, config *exec.Config) {
|
||||||
// Storing execs in container in order to kill them gracefully whenever the container is stopped or removed.
|
// Storing execs in container in order to kill them gracefully whenever the container is stopped or removed.
|
||||||
container.ExecCommands.Add(config.ID, config)
|
container.ExecCommands.Add(config.ID, config)
|
||||||
|
@ -272,7 +269,10 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, optio
|
||||||
CloseStdin: true,
|
CloseStdin: true,
|
||||||
}
|
}
|
||||||
ec.StreamConfig.AttachStreams(&attachConfig)
|
ec.StreamConfig.AttachStreams(&attachConfig)
|
||||||
attachErr := ec.StreamConfig.CopyStreams(ctx, &attachConfig)
|
// using context.Background() so that attachErr does not race ctx.Done().
|
||||||
|
copyCtx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
attachErr := ec.StreamConfig.CopyStreams(copyCtx, &attachConfig)
|
||||||
|
|
||||||
// Synchronize with libcontainerd event loop
|
// Synchronize with libcontainerd event loop
|
||||||
ec.Lock()
|
ec.Lock()
|
||||||
|
@ -292,18 +292,15 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, optio
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID)
|
log := logrus.
|
||||||
daemon.containerd.SignalProcess(ctx, c.ID, name, signal.SignalMap["TERM"])
|
WithField("container", c.ID).
|
||||||
|
WithField("exec", name)
|
||||||
timeout := time.NewTimer(termProcessTimeout)
|
log.Debug("Sending KILL signal to container process")
|
||||||
defer timeout.Stop()
|
sigCtx, cancelFunc := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
|
defer cancelFunc()
|
||||||
select {
|
err := daemon.containerd.SignalProcess(sigCtx, c.ID, name, signal.SignalMap["KILL"])
|
||||||
case <-timeout.C:
|
if err != nil {
|
||||||
logrus.Infof("Container %v, process %v failed to exit within %v of signal TERM - using the force", c.ID, name, termProcessTimeout)
|
log.WithError(err).Error("Could not send KILL signal to container process")
|
||||||
daemon.containerd.SignalProcess(ctx, c.ID, name, signal.SignalMap["KILL"])
|
|
||||||
case <-attachErr:
|
|
||||||
// TERM signal worked
|
|
||||||
}
|
}
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
case err := <-attachErr:
|
case err := <-attachErr:
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue