Compare commits
211 commits
Author | SHA1 | Date | |
---|---|---|---|
|
aee8b332bf | ||
|
12c4e03288 | ||
|
e2e670299f | ||
|
f42f65b464 | ||
|
935787c19c | ||
|
bd19301d9e | ||
|
79c31c12fc | ||
|
50bd133ad3 | ||
|
e63daec867 | ||
|
817bccb1c6 | ||
|
2a0601e84e | ||
|
9df9ccc06f | ||
|
a987bc5ad0 | ||
|
20c205fd3a | ||
|
4be97233cc | ||
|
7ed7e6caf6 | ||
|
81ad7062f0 | ||
|
02d4ee3f9a | ||
|
5901652edd | ||
|
478f6b097d | ||
|
98b171fd4d | ||
|
d250e13945 | ||
|
061aa95809 | ||
|
d0d85f6438 | ||
|
5d6679345c | ||
|
ef1fa235cd | ||
|
0451b287dc | ||
|
d27fe2558d | ||
|
77de535364 | ||
|
9e526bc394 | ||
|
2d347024d1 | ||
|
51e876cd96 | ||
|
3fa0cedce3 | ||
|
4e7d8531ed | ||
|
f66b5f642e | ||
|
41fde13f64 | ||
|
0db1c6d8bb | ||
|
33a29c0135 | ||
|
30545de83e | ||
|
fa4ea308f0 | ||
|
d66e0fb7b1 | ||
|
30ecc0ea8a | ||
|
06767446fe | ||
|
7048a63686 | ||
|
81fb7f9986 | ||
|
b77bb69f87 | ||
|
7a4abb8c77 | ||
|
81a83f0544 | ||
|
abcd6f8a46 | ||
|
f7be6dcba6 | ||
|
10609544e5 | ||
|
be59afce2d | ||
|
97951c39fb | ||
|
2001813571 | ||
|
8e3bcf1974 | ||
|
27f36f42a4 | ||
|
1ae019fca2 | ||
|
c761353e7c | ||
|
00b2e1072b | ||
|
10bc347b03 | ||
|
6d675b429e | ||
|
9f1b47c597 | ||
|
94137f6df5 | ||
|
dd5faa9d4f | ||
|
012bfd33e5 | ||
|
3ec1946ce1 | ||
|
200a2c3576 | ||
|
cb66214dfd | ||
|
70c05fe10c | ||
|
e85cef89fa | ||
|
a72294a668 | ||
|
9ee331235a | ||
|
6fb71a9764 | ||
|
5d9e13bc84 | ||
|
36d02bf488 | ||
|
bb66c3ca04 | ||
|
fa3a64f2bc | ||
|
f417435e5f | ||
|
acd023d42b | ||
|
7a075cacf9 | ||
|
aff7177ee7 | ||
|
ed7c26339e | ||
|
74e3b4fb2e | ||
|
4cc0416534 | ||
|
f9f9e7ff9a | ||
|
5fb4eb941d | ||
|
67e9aa6d4d | ||
|
61b82be580 | ||
|
0227d95f99 | ||
|
fa9c5c55e1 | ||
|
df96d8d0bd | ||
|
1652559be4 | ||
|
ab29279200 | ||
|
147b5388dd | ||
|
60103717bc | ||
|
45dede440e | ||
|
ba4a2dab16 | ||
|
51133117fb | ||
|
341a7978a5 | ||
|
10e3bfd0ac | ||
|
269a0d8feb | ||
|
876b1d1dcd | ||
|
0bcd64689b | ||
|
8d454710cd | ||
|
6cf694fe70 | ||
|
c12bbf549b | ||
|
1ae115175c | ||
|
a7f9907f5f | ||
|
9150d0115e | ||
|
9af7c8ec0a | ||
|
3344c502da | ||
|
6c9fafdda7 | ||
|
f8a8cdaf9e | ||
|
7a659049b8 | ||
|
0ccf1c2a93 | ||
|
28c1a8bc2b | ||
|
5b5a58b2cd | ||
|
282891f70c | ||
|
bbe6f09afc | ||
|
5b13a38144 | ||
|
990e95dcf0 | ||
|
a140d0d95f | ||
|
91a8312fb7 | ||
|
cf03e96354 | ||
|
c48b67160d | ||
|
225e043196 | ||
|
78174d2e74 | ||
|
622e66684a | ||
|
85f4e6151a | ||
|
3e358447f5 | ||
|
dd4de8f388 | ||
|
f5ef4e76b3 | ||
|
6c5e5271c1 | ||
|
693fca6199 | ||
|
49487e996a | ||
|
0358f31dc2 | ||
|
081cffb3fa | ||
|
9de19554c7 | ||
|
2a80b8a7b2 | ||
|
61ffecfa3b | ||
|
02cd8dec03 | ||
|
1d7df5ecc0 | ||
|
4e68a265ed | ||
|
e437f890ba | ||
|
5a0015f72c | ||
|
5babfee371 | ||
|
fce6e0ca9b | ||
|
d838e68300 | ||
|
fa0d4159c7 | ||
|
06e22dce46 | ||
|
b73ee94289 | ||
|
fd6a419ad5 | ||
|
13ce91825f | ||
|
4b63c47c1e | ||
|
4edb71bb83 | ||
|
667bc3f803 | ||
|
1b47bfac02 | ||
|
f2d0d87c46 | ||
|
6ac38cdbeb | ||
|
d7bf237e29 | ||
|
f41b342cbe | ||
|
f413ba6fdb | ||
|
c2ef38f790 | ||
|
d5eebf9e19 | ||
|
f3f5327b48 | ||
|
05a370f52f | ||
|
be7b60ef05 | ||
|
6d05b9b65b | ||
|
c01bbbddeb | ||
|
32635850ed | ||
|
2cf1c762f8 | ||
|
71fa3ab079 | ||
|
5295e88ceb | ||
|
6eef840b8a | ||
|
e2ab4718c8 | ||
|
3de920a0b1 | ||
|
a445aa95e5 | ||
|
cb77e48229 | ||
|
e8801fbe26 | ||
|
613b6a12c1 | ||
|
1b6738369f | ||
|
b8cc2e8c66 | ||
|
fcccfeb811 | ||
|
f8eaa14a18 | ||
|
ac76925ff2 | ||
|
c7a1d928c0 | ||
|
2672baefd7 | ||
|
ff15b49b47 | ||
|
c0573b133f | ||
|
c7466c0b52 | ||
|
dde33d0dfe | ||
|
39fedb254b | ||
|
f0f5fc974a | ||
|
7c185a1e40 | ||
|
2b036fb1da | ||
|
1f24da70d8 | ||
|
358fecb566 | ||
|
f030b25770 | ||
|
e07aed0f77 | ||
|
cdf3611cff | ||
|
05267e9e8c | ||
|
e5edf62bca | ||
|
e14d121d49 | ||
|
e0acf1cd70 | ||
|
c2847b2eb2 | ||
|
0894f7fe69 | ||
|
d25aa32c21 | ||
|
1e335cfa74 | ||
|
4d287e9267 | ||
|
0240f5675b | ||
|
13964248f1 |
1743 changed files with 35660 additions and 131941 deletions
|
@ -3,19 +3,11 @@
|
||||||
"build": {
|
"build": {
|
||||||
"context": "..",
|
"context": "..",
|
||||||
"dockerfile": "../Dockerfile",
|
"dockerfile": "../Dockerfile",
|
||||||
"target": "devcontainer"
|
"target": "dev"
|
||||||
},
|
},
|
||||||
"workspaceFolder": "/go/src/github.com/docker/docker",
|
"workspaceFolder": "/go/src/github.com/docker/docker",
|
||||||
"workspaceMount": "source=${localWorkspaceFolder},target=/go/src/github.com/docker/docker,type=bind,consistency=cached",
|
"workspaceMount": "source=${localWorkspaceFolder},target=/go/src/github.com/docker/docker,type=bind,consistency=cached",
|
||||||
|
|
||||||
"remoteUser": "root",
|
"remoteUser": "root",
|
||||||
"runArgs": ["--privileged"],
|
"runArgs": ["--privileged"]
|
||||||
|
|
||||||
"customizations": {
|
|
||||||
"vscode": {
|
|
||||||
"extensions": [
|
|
||||||
"golang.go"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
4
.github/workflows/.test.yml
vendored
4
.github/workflows/.test.yml
vendored
|
@ -70,7 +70,6 @@ jobs:
|
||||||
with:
|
with:
|
||||||
name: test-reports-unit-${{ inputs.storage }}
|
name: test-reports-unit-${{ inputs.storage }}
|
||||||
path: /tmp/reports/*
|
path: /tmp/reports/*
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
unit-report:
|
unit-report:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
@ -151,7 +150,6 @@ jobs:
|
||||||
with:
|
with:
|
||||||
name: test-reports-docker-py-${{ inputs.storage }}
|
name: test-reports-docker-py-${{ inputs.storage }}
|
||||||
path: /tmp/reports/*
|
path: /tmp/reports/*
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
integration-flaky:
|
integration-flaky:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
@ -273,7 +271,6 @@ jobs:
|
||||||
with:
|
with:
|
||||||
name: test-reports-integration-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
|
name: test-reports-integration-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
|
||||||
path: /tmp/reports/*
|
path: /tmp/reports/*
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
integration-report:
|
integration-report:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
@ -413,7 +410,6 @@ jobs:
|
||||||
with:
|
with:
|
||||||
name: test-reports-integration-cli-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
|
name: test-reports-integration-cli-${{ inputs.storage }}-${{ env.TESTREPORTS_NAME }}
|
||||||
path: /tmp/reports/*
|
path: /tmp/reports/*
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
integration-cli-report:
|
integration-cli-report:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
|
|
2
.github/workflows/.windows.yml
vendored
2
.github/workflows/.windows.yml
vendored
|
@ -190,7 +190,6 @@ jobs:
|
||||||
with:
|
with:
|
||||||
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
|
name: ${{ inputs.os }}-${{ inputs.storage }}-unit-reports
|
||||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
unit-test-report:
|
unit-test-report:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -509,7 +508,6 @@ jobs:
|
||||||
with:
|
with:
|
||||||
name: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-${{ env.TESTREPORTS_NAME }}
|
name: ${{ inputs.os }}-${{ inputs.storage }}-integration-reports-${{ matrix.runtime }}-${{ env.TESTREPORTS_NAME }}
|
||||||
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
path: ${{ env.GOPATH }}\src\github.com\docker\docker\bundles\*
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
integration-test-report:
|
integration-test-report:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|
11
.github/workflows/buildkit.yml
vendored
11
.github/workflows/buildkit.yml
vendored
|
@ -50,9 +50,6 @@ jobs:
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
needs:
|
needs:
|
||||||
- build
|
- build
|
||||||
env:
|
|
||||||
TEST_IMAGE_BUILD: "0"
|
|
||||||
TEST_IMAGE_ID: "buildkit-tests"
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
@ -118,14 +115,6 @@ jobs:
|
||||||
sudo service docker restart
|
sudo service docker restart
|
||||||
docker version
|
docker version
|
||||||
docker info
|
docker info
|
||||||
-
|
|
||||||
name: Build test image
|
|
||||||
uses: docker/bake-action@v4
|
|
||||||
with:
|
|
||||||
workdir: ./buildkit
|
|
||||||
targets: integration-tests
|
|
||||||
set: |
|
|
||||||
*.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
|
|
||||||
-
|
-
|
||||||
name: Test
|
name: Test
|
||||||
run: |
|
run: |
|
||||||
|
|
16
.github/workflows/ci.yml
vendored
16
.github/workflows/ci.yml
vendored
|
@ -51,6 +51,14 @@ jobs:
|
||||||
name: Check artifacts
|
name: Check artifacts
|
||||||
run: |
|
run: |
|
||||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||||
|
-
|
||||||
|
name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: ${{ matrix.target }}
|
||||||
|
path: ${{ env.DESTDIR }}
|
||||||
|
if-no-files-found: error
|
||||||
|
retention-days: 7
|
||||||
|
|
||||||
prepare-cross:
|
prepare-cross:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -111,3 +119,11 @@ jobs:
|
||||||
name: Check artifacts
|
name: Check artifacts
|
||||||
run: |
|
run: |
|
||||||
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
find ${{ env.DESTDIR }} -type f -exec file -e ascii -- {} +
|
||||||
|
-
|
||||||
|
name: Upload artifacts
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: cross-${{ env.PLATFORM_PAIR }}
|
||||||
|
path: ${{ env.DESTDIR }}
|
||||||
|
if-no-files-found: error
|
||||||
|
retention-days: 7
|
||||||
|
|
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
|
@ -14,8 +14,6 @@ on:
|
||||||
|
|
||||||
env:
|
env:
|
||||||
GO_VERSION: "1.21.9"
|
GO_VERSION: "1.21.9"
|
||||||
GIT_PAGER: "cat"
|
|
||||||
PAGER: "cat"
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
validate-dco:
|
validate-dco:
|
||||||
|
|
22
.github/workflows/validate-pr.yml
vendored
22
.github/workflows/validate-pr.yml
vendored
|
@ -11,7 +11,7 @@ jobs:
|
||||||
- name: Missing `area/` label
|
- name: Missing `area/` label
|
||||||
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/') && !contains(join(github.event.pull_request.labels.*.name, ','), 'area/')
|
if: contains(join(github.event.pull_request.labels.*.name, ','), 'impact/') && !contains(join(github.event.pull_request.labels.*.name, ','), 'area/')
|
||||||
run: |
|
run: |
|
||||||
echo "::error::Every PR with an 'impact/*' label should also have an 'area/*' label"
|
echo "Every PR with an \`impact/*\` label should also have an \`area/*\` label"
|
||||||
exit 1
|
exit 1
|
||||||
- name: OK
|
- name: OK
|
||||||
run: exit 0
|
run: exit 0
|
||||||
|
@ -32,31 +32,15 @@ jobs:
|
||||||
desc=$(echo "$block" | awk NF)
|
desc=$(echo "$block" | awk NF)
|
||||||
|
|
||||||
if [ -z "$desc" ]; then
|
if [ -z "$desc" ]; then
|
||||||
echo "::error::Changelog section is empty. Please provide a description for the changelog."
|
echo "Changelog section is empty. Please provide a description for the changelog."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
len=$(echo -n "$desc" | wc -c)
|
len=$(echo -n "$desc" | wc -c)
|
||||||
if [[ $len -le 6 ]]; then
|
if [[ $len -le 6 ]]; then
|
||||||
echo "::error::Description looks too short: $desc"
|
echo "Description looks too short: $desc"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "This PR will be included in the release notes with the following note:"
|
echo "This PR will be included in the release notes with the following note:"
|
||||||
echo "$desc"
|
echo "$desc"
|
||||||
|
|
||||||
check-pr-branch:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
env:
|
|
||||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
|
||||||
steps:
|
|
||||||
# Backports or PR that target a release branch directly should mention the target branch in the title, for example:
|
|
||||||
# [X.Y backport] Some change that needs backporting to X.Y
|
|
||||||
# [X.Y] Change directly targeting the X.Y branch
|
|
||||||
- name: Get branch from PR title
|
|
||||||
id: title_branch
|
|
||||||
run: echo "$PR_TITLE" | sed -n 's/^\[\([0-9]*\.[0-9]*\)[^]]*\].*/branch=\1/p' >> $GITHUB_OUTPUT
|
|
||||||
|
|
||||||
- name: Check release branch
|
|
||||||
if: github.event.pull_request.base.ref != steps.title_branch.outputs.branch && !(github.event.pull_request.base.ref == 'master' && steps.title_branch.outputs.branch == '')
|
|
||||||
run: echo "::error::PR title suggests targetting the ${{ steps.title_branch.outputs.branch }} branch, but is opened against ${{ github.event.pull_request.base.ref }}" && exit 1
|
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
- depguard
|
- depguard
|
||||||
- dupword # Checks for duplicate words in the source code.
|
|
||||||
- goimports
|
- goimports
|
||||||
- gosec
|
- gosec
|
||||||
- gosimple
|
- gosimple
|
||||||
|
@ -26,11 +25,6 @@ linters:
|
||||||
- docs
|
- docs
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
dupword:
|
|
||||||
ignore:
|
|
||||||
- "true" # some tests use this as expected output
|
|
||||||
- "false" # some tests use this as expected output
|
|
||||||
- "root" # for tests using "ls" output with files owned by "root:root"
|
|
||||||
importas:
|
importas:
|
||||||
# Do not allow unaliased imports of aliased packages.
|
# Do not allow unaliased imports of aliased packages.
|
||||||
no-unaliased: true
|
no-unaliased: true
|
||||||
|
@ -51,12 +45,6 @@ linters-settings:
|
||||||
deny:
|
deny:
|
||||||
- pkg: io/ioutil
|
- pkg: io/ioutil
|
||||||
desc: The io/ioutil package has been deprecated, see https://go.dev/doc/go1.16#ioutil
|
desc: The io/ioutil package has been deprecated, see https://go.dev/doc/go1.16#ioutil
|
||||||
- pkg: "github.com/stretchr/testify/assert"
|
|
||||||
desc: Use "gotest.tools/v3/assert" instead
|
|
||||||
- pkg: "github.com/stretchr/testify/require"
|
|
||||||
desc: Use "gotest.tools/v3/assert" instead
|
|
||||||
- pkg: "github.com/stretchr/testify/suite"
|
|
||||||
desc: Do not use
|
|
||||||
revive:
|
revive:
|
||||||
rules:
|
rules:
|
||||||
# FIXME make sure all packages have a description. Currently, there's many packages without.
|
# FIXME make sure all packages have a description. Currently, there's many packages without.
|
||||||
|
|
6
.mailmap
6
.mailmap
|
@ -173,8 +173,6 @@ Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
|
||||||
Dave Goodchild <buddhamagnet@gmail.com>
|
Dave Goodchild <buddhamagnet@gmail.com>
|
||||||
Dave Henderson <dhenderson@gmail.com> <Dave.Henderson@ca.ibm.com>
|
Dave Henderson <dhenderson@gmail.com> <Dave.Henderson@ca.ibm.com>
|
||||||
Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
|
Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
|
||||||
David Dooling <dooling@gmail.com>
|
|
||||||
David Dooling <dooling@gmail.com> <david.dooling@docker.com>
|
|
||||||
David M. Karr <davidmichaelkarr@gmail.com>
|
David M. Karr <davidmichaelkarr@gmail.com>
|
||||||
David Sheets <dsheets@docker.com> <sheets@alum.mit.edu>
|
David Sheets <dsheets@docker.com> <sheets@alum.mit.edu>
|
||||||
David Sissitka <me@dsissitka.com>
|
David Sissitka <me@dsissitka.com>
|
||||||
|
@ -221,8 +219,6 @@ Felix Hupfeld <felix@quobyte.com> <quofelix@users.noreply.github.com>
|
||||||
Felix Ruess <felix.ruess@gmail.com> <felix.ruess@roboception.de>
|
Felix Ruess <felix.ruess@gmail.com> <felix.ruess@roboception.de>
|
||||||
Feng Yan <fy2462@gmail.com>
|
Feng Yan <fy2462@gmail.com>
|
||||||
Fengtu Wang <wangfengtu@huawei.com> <wangfengtu@huawei.com>
|
Fengtu Wang <wangfengtu@huawei.com> <wangfengtu@huawei.com>
|
||||||
Filipe Pina <hzlu1ot0@duck.com>
|
|
||||||
Filipe Pina <hzlu1ot0@duck.com> <636320+fopina@users.noreply.github.com>
|
|
||||||
Francisco Carriedo <fcarriedo@gmail.com>
|
Francisco Carriedo <fcarriedo@gmail.com>
|
||||||
Frank Rosquin <frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
|
Frank Rosquin <frank.rosquin+github@gmail.com> <frank.rosquin@gmail.com>
|
||||||
Frank Yang <yyb196@gmail.com>
|
Frank Yang <yyb196@gmail.com>
|
||||||
|
@ -274,7 +270,6 @@ Hollie Teal <hollie@docker.com> <hollie.teal@docker.com>
|
||||||
Hollie Teal <hollie@docker.com> <hollietealok@users.noreply.github.com>
|
Hollie Teal <hollie@docker.com> <hollietealok@users.noreply.github.com>
|
||||||
hsinko <21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
|
hsinko <21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
|
||||||
Hu Keping <hukeping@huawei.com>
|
Hu Keping <hukeping@huawei.com>
|
||||||
Huajin Tong <fliterdashen@gmail.com>
|
|
||||||
Hui Kang <hkang.sunysb@gmail.com>
|
Hui Kang <hkang.sunysb@gmail.com>
|
||||||
Hui Kang <hkang.sunysb@gmail.com> <kangh@us.ibm.com>
|
Hui Kang <hkang.sunysb@gmail.com> <kangh@us.ibm.com>
|
||||||
Huu Nguyen <huu@prismskylabs.com> <whoshuu@gmail.com>
|
Huu Nguyen <huu@prismskylabs.com> <whoshuu@gmail.com>
|
||||||
|
@ -568,7 +563,6 @@ Sebastiaan van Stijn <github@gone.nl> <sebastiaan@ws-key-sebas3.dpi1.dpi>
|
||||||
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||||
Sebastian Thomschke <sebthom@users.noreply.github.com>
|
Sebastian Thomschke <sebthom@users.noreply.github.com>
|
||||||
Seongyeol Lim <seongyeol37@gmail.com>
|
Seongyeol Lim <seongyeol37@gmail.com>
|
||||||
Serhii Nakon <serhii.n@thescimus.com>
|
|
||||||
Shaun Kaasten <shaunk@gmail.com>
|
Shaun Kaasten <shaunk@gmail.com>
|
||||||
Shawn Landden <shawn@churchofgit.com> <shawnlandden@gmail.com>
|
Shawn Landden <shawn@churchofgit.com> <shawnlandden@gmail.com>
|
||||||
Shengbo Song <thomassong@tencent.com>
|
Shengbo Song <thomassong@tencent.com>
|
||||||
|
|
9
AUTHORS
9
AUTHORS
|
@ -669,7 +669,6 @@ Erik Hollensbe <github@hollensbe.org>
|
||||||
Erik Inge Bolsø <knan@redpill-linpro.com>
|
Erik Inge Bolsø <knan@redpill-linpro.com>
|
||||||
Erik Kristensen <erik@erikkristensen.com>
|
Erik Kristensen <erik@erikkristensen.com>
|
||||||
Erik Sipsma <erik@sipsma.dev>
|
Erik Sipsma <erik@sipsma.dev>
|
||||||
Erik Sjölund <erik.sjolund@gmail.com>
|
|
||||||
Erik St. Martin <alakriti@gmail.com>
|
Erik St. Martin <alakriti@gmail.com>
|
||||||
Erik Weathers <erikdw@gmail.com>
|
Erik Weathers <erikdw@gmail.com>
|
||||||
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
||||||
|
@ -732,7 +731,6 @@ Feroz Salam <feroz.salam@sourcegraph.com>
|
||||||
Ferran Rodenas <frodenas@gmail.com>
|
Ferran Rodenas <frodenas@gmail.com>
|
||||||
Filipe Brandenburger <filbranden@google.com>
|
Filipe Brandenburger <filbranden@google.com>
|
||||||
Filipe Oliveira <contato@fmoliveira.com.br>
|
Filipe Oliveira <contato@fmoliveira.com.br>
|
||||||
Filipe Pina <hzlu1ot0@duck.com>
|
|
||||||
Flavio Castelli <fcastelli@suse.com>
|
Flavio Castelli <fcastelli@suse.com>
|
||||||
Flavio Crisciani <flavio.crisciani@docker.com>
|
Flavio Crisciani <flavio.crisciani@docker.com>
|
||||||
Florian <FWirtz@users.noreply.github.com>
|
Florian <FWirtz@users.noreply.github.com>
|
||||||
|
@ -877,8 +875,6 @@ Hsing-Yu (David) Chen <davidhsingyuchen@gmail.com>
|
||||||
hsinko <21551195@zju.edu.cn>
|
hsinko <21551195@zju.edu.cn>
|
||||||
Hu Keping <hukeping@huawei.com>
|
Hu Keping <hukeping@huawei.com>
|
||||||
Hu Tao <hutao@cn.fujitsu.com>
|
Hu Tao <hutao@cn.fujitsu.com>
|
||||||
Huajin Tong <fliterdashen@gmail.com>
|
|
||||||
huang-jl <1046678590@qq.com>
|
|
||||||
HuanHuan Ye <logindaveye@gmail.com>
|
HuanHuan Ye <logindaveye@gmail.com>
|
||||||
Huanzhong Zhang <zhanghuanzhong90@gmail.com>
|
Huanzhong Zhang <zhanghuanzhong90@gmail.com>
|
||||||
Huayi Zhang <irachex@gmail.com>
|
Huayi Zhang <irachex@gmail.com>
|
||||||
|
@ -973,7 +969,6 @@ Jannick Fahlbusch <git@jf-projects.de>
|
||||||
Januar Wayong <januar@gmail.com>
|
Januar Wayong <januar@gmail.com>
|
||||||
Jared Biel <jared.biel@bolderthinking.com>
|
Jared Biel <jared.biel@bolderthinking.com>
|
||||||
Jared Hocutt <jaredh@netapp.com>
|
Jared Hocutt <jaredh@netapp.com>
|
||||||
Jaroslav Jindrak <dzejrou@gmail.com>
|
|
||||||
Jaroslaw Zabiello <hipertracker@gmail.com>
|
Jaroslaw Zabiello <hipertracker@gmail.com>
|
||||||
Jasmine Hegman <jasmine@jhegman.com>
|
Jasmine Hegman <jasmine@jhegman.com>
|
||||||
Jason A. Donenfeld <Jason@zx2c4.com>
|
Jason A. Donenfeld <Jason@zx2c4.com>
|
||||||
|
@ -1017,7 +1012,6 @@ Jeffrey Bolle <jeffreybolle@gmail.com>
|
||||||
Jeffrey Morgan <jmorganca@gmail.com>
|
Jeffrey Morgan <jmorganca@gmail.com>
|
||||||
Jeffrey van Gogh <jvg@google.com>
|
Jeffrey van Gogh <jvg@google.com>
|
||||||
Jenny Gebske <jennifer@gebske.de>
|
Jenny Gebske <jennifer@gebske.de>
|
||||||
Jeongseok Kang <piono623@naver.com>
|
|
||||||
Jeremy Chambers <jeremy@thehipbot.com>
|
Jeremy Chambers <jeremy@thehipbot.com>
|
||||||
Jeremy Grosser <jeremy@synack.me>
|
Jeremy Grosser <jeremy@synack.me>
|
||||||
Jeremy Huntwork <jhuntwork@lightcubesolutions.com>
|
Jeremy Huntwork <jhuntwork@lightcubesolutions.com>
|
||||||
|
@ -1035,7 +1029,6 @@ Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||||
Jhon Honce <jhonce@redhat.com>
|
Jhon Honce <jhonce@redhat.com>
|
||||||
Ji.Zhilong <zhilongji@gmail.com>
|
Ji.Zhilong <zhilongji@gmail.com>
|
||||||
Jian Liao <jliao@alauda.io>
|
Jian Liao <jliao@alauda.io>
|
||||||
Jian Zeng <anonymousknight96@gmail.com>
|
|
||||||
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||||
Jiang Jinyang <jjyruby@gmail.com>
|
Jiang Jinyang <jjyruby@gmail.com>
|
||||||
Jianyong Wu <jianyong.wu@arm.com>
|
Jianyong Wu <jianyong.wu@arm.com>
|
||||||
|
@ -1974,7 +1967,6 @@ Sergey Evstifeev <sergey.evstifeev@gmail.com>
|
||||||
Sergii Kabashniuk <skabashnyuk@codenvy.com>
|
Sergii Kabashniuk <skabashnyuk@codenvy.com>
|
||||||
Sergio Lopez <slp@redhat.com>
|
Sergio Lopez <slp@redhat.com>
|
||||||
Serhat Gülçiçek <serhat25@gmail.com>
|
Serhat Gülçiçek <serhat25@gmail.com>
|
||||||
Serhii Nakon <serhii.n@thescimus.com>
|
|
||||||
SeungUkLee <lsy931106@gmail.com>
|
SeungUkLee <lsy931106@gmail.com>
|
||||||
Sevki Hasirci <s@sevki.org>
|
Sevki Hasirci <s@sevki.org>
|
||||||
Shane Canon <scanon@lbl.gov>
|
Shane Canon <scanon@lbl.gov>
|
||||||
|
@ -2261,7 +2253,6 @@ VladimirAus <v_roudakov@yahoo.com>
|
||||||
Vladislav Kolesnikov <vkolesnikov@beget.ru>
|
Vladislav Kolesnikov <vkolesnikov@beget.ru>
|
||||||
Vlastimil Zeman <vlastimil.zeman@diffblue.com>
|
Vlastimil Zeman <vlastimil.zeman@diffblue.com>
|
||||||
Vojtech Vitek (V-Teq) <vvitek@redhat.com>
|
Vojtech Vitek (V-Teq) <vvitek@redhat.com>
|
||||||
voloder <110066198+voloder@users.noreply.github.com>
|
|
||||||
Walter Leibbrandt <github@wrl.co.za>
|
Walter Leibbrandt <github@wrl.co.za>
|
||||||
Walter Stanish <walter@pratyeka.org>
|
Walter Stanish <walter@pratyeka.org>
|
||||||
Wang Chao <chao.wang@ucloud.cn>
|
Wang Chao <chao.wang@ucloud.cn>
|
||||||
|
|
|
@ -101,7 +101,7 @@ the contributors guide.
|
||||||
<td>
|
<td>
|
||||||
<p>
|
<p>
|
||||||
Register for the Docker Community Slack at
|
Register for the Docker Community Slack at
|
||||||
<a href="https://dockr.ly/comm-slack" target="_blank">https://dockr.ly/comm-slack</a>.
|
<a href="https://dockr.ly/slack" target="_blank">https://dockr.ly/slack</a>.
|
||||||
We use the #moby-project channel for general discussion, and there are separate channels for other Moby projects such as #containerd.
|
We use the #moby-project channel for general discussion, and there are separate channels for other Moby projects such as #containerd.
|
||||||
</p>
|
</p>
|
||||||
</td>
|
</td>
|
||||||
|
|
45
Dockerfile
45
Dockerfile
|
@ -1,19 +1,19 @@
|
||||||
# syntax=docker/dockerfile:1.7
|
# syntax=docker/dockerfile:1
|
||||||
|
|
||||||
ARG GO_VERSION=1.21.9
|
ARG GO_VERSION=1.21.9
|
||||||
ARG BASE_DEBIAN_DISTRO="bookworm"
|
ARG BASE_DEBIAN_DISTRO="bookworm"
|
||||||
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
ARG GOLANG_IMAGE="golang:${GO_VERSION}-${BASE_DEBIAN_DISTRO}"
|
||||||
ARG XX_VERSION=1.4.0
|
ARG XX_VERSION=1.2.1
|
||||||
|
|
||||||
ARG VPNKIT_VERSION=0.5.0
|
ARG VPNKIT_VERSION=0.5.0
|
||||||
|
|
||||||
ARG DOCKERCLI_REPOSITORY="https://github.com/docker/cli.git"
|
ARG DOCKERCLI_REPOSITORY="https://github.com/docker/cli.git"
|
||||||
ARG DOCKERCLI_VERSION=v26.0.0
|
ARG DOCKERCLI_VERSION=v25.0.2
|
||||||
# cli version used for integration-cli tests
|
# cli version used for integration-cli tests
|
||||||
ARG DOCKERCLI_INTEGRATION_REPOSITORY="https://github.com/docker/cli.git"
|
ARG DOCKERCLI_INTEGRATION_REPOSITORY="https://github.com/docker/cli.git"
|
||||||
ARG DOCKERCLI_INTEGRATION_VERSION=v17.06.2-ce
|
ARG DOCKERCLI_INTEGRATION_VERSION=v17.06.2-ce
|
||||||
ARG BUILDX_VERSION=0.13.1
|
ARG BUILDX_VERSION=0.12.1
|
||||||
ARG COMPOSE_VERSION=v2.25.0
|
ARG COMPOSE_VERSION=v2.24.5
|
||||||
|
|
||||||
ARG SYSTEMD="false"
|
ARG SYSTEMD="false"
|
||||||
ARG DOCKER_STATIC=1
|
ARG DOCKER_STATIC=1
|
||||||
|
@ -24,12 +24,6 @@ ARG DOCKER_STATIC=1
|
||||||
# specified here should match a current release.
|
# specified here should match a current release.
|
||||||
ARG REGISTRY_VERSION=2.8.3
|
ARG REGISTRY_VERSION=2.8.3
|
||||||
|
|
||||||
# delve is currently only supported on linux/amd64 and linux/arm64;
|
|
||||||
# https://github.com/go-delve/delve/blob/v1.8.1/pkg/proc/native/support_sentinel.go#L1-L6
|
|
||||||
ARG DELVE_SUPPORTED=${TARGETPLATFORM#linux/amd64} DELVE_SUPPORTED=${DELVE_SUPPORTED#linux/arm64}
|
|
||||||
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:+"unsupported"}
|
|
||||||
ARG DELVE_SUPPORTED=${DELVE_SUPPORTED:-"supported"}
|
|
||||||
|
|
||||||
# cross compilation helper
|
# cross compilation helper
|
||||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||||
|
|
||||||
|
@ -150,7 +144,7 @@ RUN git init . && git remote add origin "https://github.com/go-delve/delve.git"
|
||||||
ARG DELVE_VERSION=v1.21.1
|
ARG DELVE_VERSION=v1.21.1
|
||||||
RUN git fetch -q --depth 1 origin "${DELVE_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
RUN git fetch -q --depth 1 origin "${DELVE_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||||
|
|
||||||
FROM base AS delve-supported
|
FROM base AS delve-build
|
||||||
WORKDIR /usr/src/delve
|
WORKDIR /usr/src/delve
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
RUN --mount=from=delve-src,src=/usr/src/delve,rw \
|
RUN --mount=from=delve-src,src=/usr/src/delve,rw \
|
||||||
|
@ -161,8 +155,16 @@ RUN --mount=from=delve-src,src=/usr/src/delve,rw \
|
||||||
xx-verify /build/dlv
|
xx-verify /build/dlv
|
||||||
EOT
|
EOT
|
||||||
|
|
||||||
FROM binary-dummy AS delve-unsupported
|
# delve is currently only supported on linux/amd64 and linux/arm64;
|
||||||
FROM delve-${DELVE_SUPPORTED} AS delve
|
# https://github.com/go-delve/delve/blob/v1.8.1/pkg/proc/native/support_sentinel.go#L1-L6
|
||||||
|
FROM binary-dummy AS delve-windows
|
||||||
|
FROM binary-dummy AS delve-linux-arm
|
||||||
|
FROM binary-dummy AS delve-linux-ppc64le
|
||||||
|
FROM binary-dummy AS delve-linux-s390x
|
||||||
|
FROM delve-build AS delve-linux-amd64
|
||||||
|
FROM delve-build AS delve-linux-arm64
|
||||||
|
FROM delve-linux-${TARGETARCH} AS delve-linux
|
||||||
|
FROM delve-${TARGETOS} AS delve
|
||||||
|
|
||||||
FROM base AS tomll
|
FROM base AS tomll
|
||||||
# GOTOML_VERSION specifies the version of the tomll binary to build and install
|
# GOTOML_VERSION specifies the version of the tomll binary to build and install
|
||||||
|
@ -196,7 +198,7 @@ RUN git init . && git remote add origin "https://github.com/containerd/container
|
||||||
# When updating the binary version you may also need to update the vendor
|
# When updating the binary version you may also need to update the vendor
|
||||||
# version to pick up bug fixes or new APIs, however, usually the Go packages
|
# version to pick up bug fixes or new APIs, however, usually the Go packages
|
||||||
# are built from a commit from the master branch.
|
# are built from a commit from the master branch.
|
||||||
ARG CONTAINERD_VERSION=v1.7.15
|
ARG CONTAINERD_VERSION=v1.7.13
|
||||||
RUN git fetch -q --depth 1 origin "${CONTAINERD_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
RUN git fetch -q --depth 1 origin "${CONTAINERD_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||||
|
|
||||||
FROM base AS containerd-build
|
FROM base AS containerd-build
|
||||||
|
@ -243,18 +245,12 @@ RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||||
&& /build/gotestsum --version
|
&& /build/gotestsum --version
|
||||||
|
|
||||||
FROM base AS shfmt
|
FROM base AS shfmt
|
||||||
ARG SHFMT_VERSION=v3.8.0
|
ARG SHFMT_VERSION=v3.6.0
|
||||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
GOBIN=/build/ GO111MODULE=on go install "mvdan.cc/sh/v3/cmd/shfmt@${SHFMT_VERSION}" \
|
GOBIN=/build/ GO111MODULE=on go install "mvdan.cc/sh/v3/cmd/shfmt@${SHFMT_VERSION}" \
|
||||||
&& /build/shfmt --version
|
&& /build/shfmt --version
|
||||||
|
|
||||||
FROM base AS gopls
|
|
||||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
|
||||||
GOBIN=/build/ GO111MODULE=on go install "golang.org/x/tools/gopls@latest" \
|
|
||||||
&& /build/gopls version
|
|
||||||
|
|
||||||
FROM base AS dockercli
|
FROM base AS dockercli
|
||||||
WORKDIR /go/src/github.com/docker/cli
|
WORKDIR /go/src/github.com/docker/cli
|
||||||
ARG DOCKERCLI_REPOSITORY
|
ARG DOCKERCLI_REPOSITORY
|
||||||
|
@ -659,11 +655,6 @@ RUN <<EOT
|
||||||
docker-proxy --version
|
docker-proxy --version
|
||||||
EOT
|
EOT
|
||||||
|
|
||||||
# devcontainer is a stage used by .devcontainer/devcontainer.json
|
|
||||||
FROM dev-base AS devcontainer
|
|
||||||
COPY --link . .
|
|
||||||
COPY --link --from=gopls /build/ /usr/local/bin/
|
|
||||||
|
|
||||||
# usage:
|
# usage:
|
||||||
# > make shell
|
# > make shell
|
||||||
# > SYSTEMD=true make shell
|
# > SYSTEMD=true make shell
|
||||||
|
|
|
@ -164,7 +164,7 @@ SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPref
|
||||||
ARG GO_VERSION=1.21.9
|
ARG GO_VERSION=1.21.9
|
||||||
ARG GOTESTSUM_VERSION=v1.8.2
|
ARG GOTESTSUM_VERSION=v1.8.2
|
||||||
ARG GOWINRES_VERSION=v0.3.1
|
ARG GOWINRES_VERSION=v0.3.1
|
||||||
ARG CONTAINERD_VERSION=v1.7.15
|
ARG CONTAINERD_VERSION=v1.7.13
|
||||||
|
|
||||||
# Environment variable notes:
|
# Environment variable notes:
|
||||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||||
|
|
5
Makefile
5
Makefile
|
@ -16,9 +16,6 @@ export VALIDATE_REPO
|
||||||
export VALIDATE_BRANCH
|
export VALIDATE_BRANCH
|
||||||
export VALIDATE_ORIGIN_BRANCH
|
export VALIDATE_ORIGIN_BRANCH
|
||||||
|
|
||||||
export PAGER
|
|
||||||
export GIT_PAGER
|
|
||||||
|
|
||||||
# env vars passed through directly to Docker's build scripts
|
# env vars passed through directly to Docker's build scripts
|
||||||
# to allow things like `make KEEPBUNDLE=1 binary` easily
|
# to allow things like `make KEEPBUNDLE=1 binary` easily
|
||||||
# `project/PACKAGERS.md` have some limited documentation of some of these
|
# `project/PACKAGERS.md` have some limited documentation of some of these
|
||||||
|
@ -80,8 +77,6 @@ DOCKER_ENVS := \
|
||||||
-e DEFAULT_PRODUCT_LICENSE \
|
-e DEFAULT_PRODUCT_LICENSE \
|
||||||
-e PRODUCT \
|
-e PRODUCT \
|
||||||
-e PACKAGER_NAME \
|
-e PACKAGER_NAME \
|
||||||
-e PAGER \
|
|
||||||
-e GIT_PAGER \
|
|
||||||
-e OTEL_EXPORTER_OTLP_ENDPOINT \
|
-e OTEL_EXPORTER_OTLP_ENDPOINT \
|
||||||
-e OTEL_EXPORTER_OTLP_PROTOCOL \
|
-e OTEL_EXPORTER_OTLP_PROTOCOL \
|
||||||
-e OTEL_SERVICE_NAME
|
-e OTEL_SERVICE_NAME
|
||||||
|
|
|
@ -2,17 +2,8 @@ package api // import "github.com/docker/docker/api"
|
||||||
|
|
||||||
// Common constants for daemon and client.
|
// Common constants for daemon and client.
|
||||||
const (
|
const (
|
||||||
// DefaultVersion of the current REST API.
|
// DefaultVersion of Current REST API
|
||||||
DefaultVersion = "1.45"
|
DefaultVersion = "1.44"
|
||||||
|
|
||||||
// MinSupportedAPIVersion is the minimum API version that can be supported
|
|
||||||
// by the API server, specified as "major.minor". Note that the daemon
|
|
||||||
// may be configured with a different minimum API version, as returned
|
|
||||||
// in [github.com/docker/docker/api/types.Version.MinAPIVersion].
|
|
||||||
//
|
|
||||||
// API requests for API versions lower than the configured version produce
|
|
||||||
// an error.
|
|
||||||
MinSupportedAPIVersion = "1.24"
|
|
||||||
|
|
||||||
// NoBaseImageSpecifier is the symbol used by the FROM
|
// NoBaseImageSpecifier is the symbol used by the FROM
|
||||||
// command to specify that no base image is to be used.
|
// command to specify that no base image is to be used.
|
||||||
|
|
34
api/server/errorhandler.go
Normal file
34
api/server/errorhandler.go
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/server/httpstatus"
|
||||||
|
"github.com/docker/docker/api/server/httputils"
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/versions"
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// makeErrorHandler makes an HTTP handler that decodes a Docker error and
|
||||||
|
// returns it in the response.
|
||||||
|
func makeErrorHandler(err error) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
statusCode := httpstatus.FromError(err)
|
||||||
|
vars := mux.Vars(r)
|
||||||
|
if apiVersionSupportsJSONErrors(vars["version"]) {
|
||||||
|
response := &types.ErrorResponse{
|
||||||
|
Message: err.Error(),
|
||||||
|
}
|
||||||
|
_ = httputils.WriteJSON(w, statusCode, response)
|
||||||
|
} else {
|
||||||
|
http.Error(w, status.Convert(err).Message(), statusCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func apiVersionSupportsJSONErrors(version string) bool {
|
||||||
|
const firstAPIVersionWithJSONErrors = "1.23"
|
||||||
|
return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors)
|
||||||
|
}
|
|
@ -12,4 +12,5 @@ import (
|
||||||
// container configuration.
|
// container configuration.
|
||||||
type ContainerDecoder interface {
|
type ContainerDecoder interface {
|
||||||
DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error)
|
DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error)
|
||||||
|
DecodeHostConfig(src io.Reader) (*container.HostConfig, error)
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/docker/docker/api"
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
)
|
)
|
||||||
|
@ -14,40 +13,19 @@ import (
|
||||||
// VersionMiddleware is a middleware that
|
// VersionMiddleware is a middleware that
|
||||||
// validates the client and server versions.
|
// validates the client and server versions.
|
||||||
type VersionMiddleware struct {
|
type VersionMiddleware struct {
|
||||||
serverVersion string
|
serverVersion string
|
||||||
|
defaultVersion string
|
||||||
// defaultAPIVersion is the default API version provided by the API server,
|
minVersion string
|
||||||
// specified as "major.minor". It is usually configured to the latest API
|
|
||||||
// version [github.com/docker/docker/api.DefaultVersion].
|
|
||||||
//
|
|
||||||
// API requests for API versions greater than this version are rejected by
|
|
||||||
// the server and produce a [versionUnsupportedError].
|
|
||||||
defaultAPIVersion string
|
|
||||||
|
|
||||||
// minAPIVersion is the minimum API version provided by the API server,
|
|
||||||
// specified as "major.minor".
|
|
||||||
//
|
|
||||||
// API requests for API versions lower than this version are rejected by
|
|
||||||
// the server and produce a [versionUnsupportedError].
|
|
||||||
minAPIVersion string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewVersionMiddleware creates a VersionMiddleware with the given versions.
|
// NewVersionMiddleware creates a new VersionMiddleware
|
||||||
func NewVersionMiddleware(serverVersion, defaultAPIVersion, minAPIVersion string) (*VersionMiddleware, error) {
|
// with the default versions.
|
||||||
if versions.LessThan(defaultAPIVersion, api.MinSupportedAPIVersion) || versions.GreaterThan(defaultAPIVersion, api.DefaultVersion) {
|
func NewVersionMiddleware(s, d, m string) VersionMiddleware {
|
||||||
return nil, fmt.Errorf("invalid default API version (%s): must be between %s and %s", defaultAPIVersion, api.MinSupportedAPIVersion, api.DefaultVersion)
|
return VersionMiddleware{
|
||||||
|
serverVersion: s,
|
||||||
|
defaultVersion: d,
|
||||||
|
minVersion: m,
|
||||||
}
|
}
|
||||||
if versions.LessThan(minAPIVersion, api.MinSupportedAPIVersion) || versions.GreaterThan(minAPIVersion, api.DefaultVersion) {
|
|
||||||
return nil, fmt.Errorf("invalid minimum API version (%s): must be between %s and %s", minAPIVersion, api.MinSupportedAPIVersion, api.DefaultVersion)
|
|
||||||
}
|
|
||||||
if versions.GreaterThan(minAPIVersion, defaultAPIVersion) {
|
|
||||||
return nil, fmt.Errorf("invalid API version: the minimum API version (%s) is higher than the default version (%s)", minAPIVersion, defaultAPIVersion)
|
|
||||||
}
|
|
||||||
return &VersionMiddleware{
|
|
||||||
serverVersion: serverVersion,
|
|
||||||
defaultAPIVersion: defaultAPIVersion,
|
|
||||||
minAPIVersion: minAPIVersion,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type versionUnsupportedError struct {
|
type versionUnsupportedError struct {
|
||||||
|
@ -67,18 +45,18 @@ func (e versionUnsupportedError) InvalidParameter() {}
|
||||||
func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
w.Header().Set("Server", fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS))
|
w.Header().Set("Server", fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS))
|
||||||
w.Header().Set("API-Version", v.defaultAPIVersion)
|
w.Header().Set("API-Version", v.defaultVersion)
|
||||||
w.Header().Set("OSType", runtime.GOOS)
|
w.Header().Set("OSType", runtime.GOOS)
|
||||||
|
|
||||||
apiVersion := vars["version"]
|
apiVersion := vars["version"]
|
||||||
if apiVersion == "" {
|
if apiVersion == "" {
|
||||||
apiVersion = v.defaultAPIVersion
|
apiVersion = v.defaultVersion
|
||||||
}
|
}
|
||||||
if versions.LessThan(apiVersion, v.minAPIVersion) {
|
if versions.LessThan(apiVersion, v.minVersion) {
|
||||||
return versionUnsupportedError{version: apiVersion, minVersion: v.minAPIVersion}
|
return versionUnsupportedError{version: apiVersion, minVersion: v.minVersion}
|
||||||
}
|
}
|
||||||
if versions.GreaterThan(apiVersion, v.defaultAPIVersion) {
|
if versions.GreaterThan(apiVersion, v.defaultVersion) {
|
||||||
return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultAPIVersion}
|
return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultVersion}
|
||||||
}
|
}
|
||||||
ctx = context.WithValue(ctx, httputils.APIVersionKey{}, apiVersion)
|
ctx = context.WithValue(ctx, httputils.APIVersionKey{}, apiVersion)
|
||||||
return handler(ctx, w, r, vars)
|
return handler(ctx, w, r, vars)
|
||||||
|
|
|
@ -2,82 +2,27 @@ package middleware // import "github.com/docker/docker/api/server/middleware"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"runtime"
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/docker/api"
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/v3/assert"
|
||||||
is "gotest.tools/v3/assert/cmp"
|
is "gotest.tools/v3/assert/cmp"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewVersionMiddlewareValidation(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
doc, defaultVersion, minVersion, expectedErr string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
doc: "defaults",
|
|
||||||
defaultVersion: api.DefaultVersion,
|
|
||||||
minVersion: api.MinSupportedAPIVersion,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
doc: "invalid default lower than min",
|
|
||||||
defaultVersion: api.MinSupportedAPIVersion,
|
|
||||||
minVersion: api.DefaultVersion,
|
|
||||||
expectedErr: fmt.Sprintf("invalid API version: the minimum API version (%s) is higher than the default version (%s)", api.DefaultVersion, api.MinSupportedAPIVersion),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
doc: "invalid default too low",
|
|
||||||
defaultVersion: "0.1",
|
|
||||||
minVersion: api.MinSupportedAPIVersion,
|
|
||||||
expectedErr: fmt.Sprintf("invalid default API version (0.1): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
doc: "invalid default too high",
|
|
||||||
defaultVersion: "9999.9999",
|
|
||||||
minVersion: api.DefaultVersion,
|
|
||||||
expectedErr: fmt.Sprintf("invalid default API version (9999.9999): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
doc: "invalid minimum too low",
|
|
||||||
defaultVersion: api.MinSupportedAPIVersion,
|
|
||||||
minVersion: "0.1",
|
|
||||||
expectedErr: fmt.Sprintf("invalid minimum API version (0.1): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
doc: "invalid minimum too high",
|
|
||||||
defaultVersion: api.DefaultVersion,
|
|
||||||
minVersion: "9999.9999",
|
|
||||||
expectedErr: fmt.Sprintf("invalid minimum API version (9999.9999): must be between %s and %s", api.MinSupportedAPIVersion, api.DefaultVersion),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range tests {
|
|
||||||
tc := tc
|
|
||||||
t.Run(tc.doc, func(t *testing.T) {
|
|
||||||
_, err := NewVersionMiddleware("1.2.3", tc.defaultVersion, tc.minVersion)
|
|
||||||
if tc.expectedErr == "" {
|
|
||||||
assert.Check(t, err)
|
|
||||||
} else {
|
|
||||||
assert.Check(t, is.Error(err, tc.expectedErr))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVersionMiddlewareVersion(t *testing.T) {
|
func TestVersionMiddlewareVersion(t *testing.T) {
|
||||||
expectedVersion := "<not set>"
|
defaultVersion := "1.10.0"
|
||||||
|
minVersion := "1.2.0"
|
||||||
|
expectedVersion := defaultVersion
|
||||||
handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
v := httputils.VersionFromContext(ctx)
|
v := httputils.VersionFromContext(ctx)
|
||||||
assert.Check(t, is.Equal(expectedVersion, v))
|
assert.Check(t, is.Equal(expectedVersion, v))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err := NewVersionMiddleware("1.2.3", api.DefaultVersion, api.MinSupportedAPIVersion)
|
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
|
||||||
assert.NilError(t, err)
|
|
||||||
h := m.WrapHandler(handler)
|
h := m.WrapHandler(handler)
|
||||||
|
|
||||||
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
||||||
|
@ -90,19 +35,19 @@ func TestVersionMiddlewareVersion(t *testing.T) {
|
||||||
errString string
|
errString string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
expectedVersion: api.DefaultVersion,
|
expectedVersion: "1.10.0",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
reqVersion: api.MinSupportedAPIVersion,
|
reqVersion: "1.9.0",
|
||||||
expectedVersion: api.MinSupportedAPIVersion,
|
expectedVersion: "1.9.0",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
reqVersion: "0.1",
|
reqVersion: "0.1",
|
||||||
errString: fmt.Sprintf("client version 0.1 is too old. Minimum supported API version is %s, please upgrade your client to a newer version", api.MinSupportedAPIVersion),
|
errString: "client version 0.1 is too old. Minimum supported API version is 1.2.0, please upgrade your client to a newer version",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
reqVersion: "9999.9999",
|
reqVersion: "9999.9999",
|
||||||
errString: fmt.Sprintf("client version 9999.9999 is too new. Maximum supported API version is %s", api.DefaultVersion),
|
errString: "client version 9999.9999 is too new. Maximum supported API version is 1.10.0",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -126,8 +71,9 @@ func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err := NewVersionMiddleware("1.2.3", api.DefaultVersion, api.MinSupportedAPIVersion)
|
defaultVersion := "1.10.0"
|
||||||
assert.NilError(t, err)
|
minVersion := "1.2.0"
|
||||||
|
m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
|
||||||
h := m.WrapHandler(handler)
|
h := m.WrapHandler(handler)
|
||||||
|
|
||||||
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
||||||
|
@ -135,12 +81,12 @@ func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
vars := map[string]string{"version": "0.1"}
|
vars := map[string]string{"version": "0.1"}
|
||||||
err = h(ctx, resp, req, vars)
|
err := h(ctx, resp, req, vars)
|
||||||
assert.Check(t, is.ErrorContains(err, ""))
|
assert.Check(t, is.ErrorContains(err, ""))
|
||||||
|
|
||||||
hdr := resp.Result().Header
|
hdr := resp.Result().Header
|
||||||
assert.Check(t, is.Contains(hdr.Get("Server"), "Docker/1.2.3"))
|
assert.Check(t, is.Contains(hdr.Get("Server"), "Docker/"+defaultVersion))
|
||||||
assert.Check(t, is.Contains(hdr.Get("Server"), runtime.GOOS))
|
assert.Check(t, is.Contains(hdr.Get("Server"), runtime.GOOS))
|
||||||
assert.Check(t, is.Equal(hdr.Get("API-Version"), api.DefaultVersion))
|
assert.Check(t, is.Equal(hdr.Get("API-Version"), defaultVersion))
|
||||||
assert.Check(t, is.Equal(hdr.Get("OSType"), runtime.GOOS))
|
assert.Check(t, is.Equal(hdr.Get("OSType"), runtime.GOOS))
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,7 +42,6 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||||
SuppressOutput: httputils.BoolValue(r, "q"),
|
SuppressOutput: httputils.BoolValue(r, "q"),
|
||||||
NoCache: httputils.BoolValue(r, "nocache"),
|
NoCache: httputils.BoolValue(r, "nocache"),
|
||||||
ForceRemove: httputils.BoolValue(r, "forcerm"),
|
ForceRemove: httputils.BoolValue(r, "forcerm"),
|
||||||
PullParent: httputils.BoolValue(r, "pull"),
|
|
||||||
MemorySwap: httputils.Int64ValueOrZero(r, "memswap"),
|
MemorySwap: httputils.Int64ValueOrZero(r, "memswap"),
|
||||||
Memory: httputils.Int64ValueOrZero(r, "memory"),
|
Memory: httputils.Int64ValueOrZero(r, "memory"),
|
||||||
CPUShares: httputils.Int64ValueOrZero(r, "cpushares"),
|
CPUShares: httputils.Int64ValueOrZero(r, "cpushares"),
|
||||||
|
@ -67,14 +66,17 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
|
||||||
return nil, invalidParam{errors.New("security options are not supported on " + runtime.GOOS)}
|
return nil, invalidParam{errors.New("security options are not supported on " + runtime.GOOS)}
|
||||||
}
|
}
|
||||||
|
|
||||||
if httputils.BoolValue(r, "forcerm") {
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") {
|
||||||
options.Remove = true
|
options.Remove = true
|
||||||
} else if r.FormValue("rm") == "" {
|
} else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") {
|
||||||
options.Remove = true
|
options.Remove = true
|
||||||
} else {
|
} else {
|
||||||
options.Remove = httputils.BoolValue(r, "rm")
|
options.Remove = httputils.BoolValue(r, "rm")
|
||||||
}
|
}
|
||||||
version := httputils.VersionFromContext(ctx)
|
if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") {
|
||||||
|
options.PullParent = true
|
||||||
|
}
|
||||||
if versions.GreaterThanOrEqualTo(version, "1.32") {
|
if versions.GreaterThanOrEqualTo(version, "1.32") {
|
||||||
options.Platform = r.FormValue("platform")
|
options.Platform = r.FormValue("platform")
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ type execBackend interface {
|
||||||
// copyBackend includes functions to implement to provide container copy functionality.
|
// copyBackend includes functions to implement to provide container copy functionality.
|
||||||
type copyBackend interface {
|
type copyBackend interface {
|
||||||
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error)
|
ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error)
|
||||||
|
ContainerCopy(name string, res string) (io.ReadCloser, error)
|
||||||
ContainerExport(ctx context.Context, name string, out io.Writer) error
|
ContainerExport(ctx context.Context, name string, out io.Writer) error
|
||||||
ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error
|
ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error
|
||||||
ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error)
|
ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error)
|
||||||
|
@ -38,7 +39,7 @@ type stateBackend interface {
|
||||||
ContainerResize(name string, height, width int) error
|
ContainerResize(name string, height, width int) error
|
||||||
ContainerRestart(ctx context.Context, name string, options container.StopOptions) error
|
ContainerRestart(ctx context.Context, name string, options container.StopOptions) error
|
||||||
ContainerRm(name string, config *backend.ContainerRmConfig) error
|
ContainerRm(name string, config *backend.ContainerRmConfig) error
|
||||||
ContainerStart(ctx context.Context, name string, checkpoint string, checkpointDir string) error
|
ContainerStart(ctx context.Context, name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
|
||||||
ContainerStop(ctx context.Context, name string, options container.StopOptions) error
|
ContainerStop(ctx context.Context, name string, options container.StopOptions) error
|
||||||
ContainerUnpause(name string) error
|
ContainerUnpause(name string) error
|
||||||
ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error)
|
ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error)
|
||||||
|
|
|
@ -56,6 +56,7 @@ func (r *containerRouter) initRoutes() {
|
||||||
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait),
|
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait),
|
||||||
router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize),
|
router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize),
|
||||||
router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach),
|
router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach),
|
||||||
|
router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8 (API v1.20), errors out since 1.12 (API v1.24)
|
||||||
router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate),
|
router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate),
|
||||||
router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart),
|
router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart),
|
||||||
router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
|
router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
|
||||||
|
|
|
@ -39,6 +39,13 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter,
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: remove pause arg, and always pause in backend
|
||||||
|
pause := httputils.BoolValue(r, "pause")
|
||||||
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") {
|
||||||
|
pause = true
|
||||||
|
}
|
||||||
|
|
||||||
config, _, _, err := s.decoder.DecodeConfig(r.Body)
|
config, _, _, err := s.decoder.DecodeConfig(r.Body)
|
||||||
if err != nil && !errors.Is(err, io.EOF) { // Do not fail if body is empty.
|
if err != nil && !errors.Is(err, io.EOF) { // Do not fail if body is empty.
|
||||||
return err
|
return err
|
||||||
|
@ -50,7 +57,7 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter,
|
||||||
}
|
}
|
||||||
|
|
||||||
imgID, err := s.backend.CreateImageFromContainer(ctx, r.Form.Get("container"), &backend.CreateImageConfig{
|
imgID, err := s.backend.CreateImageFromContainer(ctx, r.Form.Get("container"), &backend.CreateImageConfig{
|
||||||
Pause: httputils.BoolValueOrDefault(r, "pause", true), // TODO(dnephin): remove pause arg, and always pause in backend
|
Pause: pause,
|
||||||
Tag: ref,
|
Tag: ref,
|
||||||
Author: r.Form.Get("author"),
|
Author: r.Form.Get("author"),
|
||||||
Comment: r.Form.Get("comment"),
|
Comment: r.Form.Get("comment"),
|
||||||
|
@ -111,11 +118,14 @@ func (s *containerRouter) getContainersStats(ctx context.Context, w http.Respons
|
||||||
oneShot = httputils.BoolValueOrDefault(r, "one-shot", false)
|
oneShot = httputils.BoolValueOrDefault(r, "one-shot", false)
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.backend.ContainerStats(ctx, vars["name"], &backend.ContainerStatsConfig{
|
config := &backend.ContainerStatsConfig{
|
||||||
Stream: stream,
|
Stream: stream,
|
||||||
OneShot: oneShot,
|
OneShot: oneShot,
|
||||||
OutStream: w,
|
OutStream: w,
|
||||||
})
|
Version: httputils.VersionFromContext(ctx),
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.backend.ContainerStats(ctx, vars["name"], config)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
|
@ -168,6 +178,14 @@ func (s *containerRouter) getContainersExport(ctx context.Context, w http.Respon
|
||||||
return s.backend.ContainerExport(ctx, vars["name"], w)
|
return s.backend.ContainerExport(ctx, vars["name"], w)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type bodyOnStartError struct{}
|
||||||
|
|
||||||
|
func (bodyOnStartError) Error() string {
|
||||||
|
return "starting container with non-empty request body was deprecated since API v1.22 and removed in v1.24"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (bodyOnStartError) InvalidParameter() {}
|
||||||
|
|
||||||
func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
// If contentLength is -1, we can assumed chunked encoding
|
// If contentLength is -1, we can assumed chunked encoding
|
||||||
// or more technically that the length is unknown
|
// or more technically that the length is unknown
|
||||||
|
@ -175,17 +193,33 @@ func (s *containerRouter) postContainersStart(ctx context.Context, w http.Respon
|
||||||
// net/http otherwise seems to swallow any headers related to chunked encoding
|
// net/http otherwise seems to swallow any headers related to chunked encoding
|
||||||
// including r.TransferEncoding
|
// including r.TransferEncoding
|
||||||
// allow a nil body for backwards compatibility
|
// allow a nil body for backwards compatibility
|
||||||
//
|
|
||||||
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
var hostConfig *container.HostConfig
|
||||||
// A non-nil json object is at least 7 characters.
|
// A non-nil json object is at least 7 characters.
|
||||||
if r.ContentLength > 7 || r.ContentLength == -1 {
|
if r.ContentLength > 7 || r.ContentLength == -1 {
|
||||||
return errdefs.InvalidParameter(errors.New("starting container with non-empty request body was deprecated since API v1.22 and removed in v1.24"))
|
if versions.GreaterThanOrEqualTo(version, "1.24") {
|
||||||
|
return bodyOnStartError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := httputils.CheckForJSON(r); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := s.decoder.DecodeHostConfig(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
hostConfig = c
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.backend.ContainerStart(ctx, vars["name"], r.Form.Get("checkpoint"), r.Form.Get("checkpoint-dir")); err != nil {
|
checkpoint := r.Form.Get("checkpoint")
|
||||||
|
checkpointDir := r.Form.Get("checkpoint-dir")
|
||||||
|
if err := s.backend.ContainerStart(ctx, vars["name"], hostConfig, checkpoint, checkpointDir); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -221,14 +255,25 @@ func (s *containerRouter) postContainersStop(ctx context.Context, w http.Respons
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *containerRouter) postContainersKill(_ context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
if err := httputils.ParseForm(r); err != nil {
|
if err := httputils.ParseForm(r); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
name := vars["name"]
|
name := vars["name"]
|
||||||
if err := s.backend.ContainerKill(name, r.Form.Get("signal")); err != nil {
|
if err := s.backend.ContainerKill(name, r.Form.Get("signal")); err != nil {
|
||||||
return errors.Wrapf(err, "cannot kill container: %s", name)
|
var isStopped bool
|
||||||
|
if errdefs.IsConflict(err) {
|
||||||
|
isStopped = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return error that's not caused because the container is stopped.
|
||||||
|
// Return error if the container is not running and the api is >= 1.20
|
||||||
|
// to keep backwards compatibility.
|
||||||
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped {
|
||||||
|
return errors.Wrapf(err, "Cannot kill container: %s", name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
w.WriteHeader(http.StatusNoContent)
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
@ -456,29 +501,18 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||||
if hostConfig == nil {
|
if hostConfig == nil {
|
||||||
hostConfig = &container.HostConfig{}
|
hostConfig = &container.HostConfig{}
|
||||||
}
|
}
|
||||||
|
if hostConfig.NetworkMode == "" {
|
||||||
|
hostConfig.NetworkMode = "default"
|
||||||
|
}
|
||||||
if networkingConfig == nil {
|
if networkingConfig == nil {
|
||||||
networkingConfig = &network.NetworkingConfig{}
|
networkingConfig = &network.NetworkingConfig{}
|
||||||
}
|
}
|
||||||
if networkingConfig.EndpointsConfig == nil {
|
if networkingConfig.EndpointsConfig == nil {
|
||||||
networkingConfig.EndpointsConfig = make(map[string]*network.EndpointSettings)
|
networkingConfig.EndpointsConfig = make(map[string]*network.EndpointSettings)
|
||||||
}
|
}
|
||||||
// The NetworkMode "default" is used as a way to express a container should
|
|
||||||
// be attached to the OS-dependant default network, in an OS-independent
|
|
||||||
// way. Doing this conversion as soon as possible ensures we have less
|
|
||||||
// NetworkMode to handle down the path (including in the
|
|
||||||
// backward-compatibility layer we have just below).
|
|
||||||
//
|
|
||||||
// Note that this is not the only place where this conversion has to be
|
|
||||||
// done (as there are various other places where containers get created).
|
|
||||||
if hostConfig.NetworkMode == "" || hostConfig.NetworkMode.IsDefault() {
|
|
||||||
hostConfig.NetworkMode = runconfig.DefaultDaemonNetworkMode()
|
|
||||||
if nw, ok := networkingConfig.EndpointsConfig[network.NetworkDefault]; ok {
|
|
||||||
networkingConfig.EndpointsConfig[hostConfig.NetworkMode.NetworkName()] = nw
|
|
||||||
delete(networkingConfig.EndpointsConfig, network.NetworkDefault)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
version := httputils.VersionFromContext(ctx)
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
adjustCPUShares := versions.LessThan(version, "1.19")
|
||||||
|
|
||||||
// When using API 1.24 and under, the client is responsible for removing the container
|
// When using API 1.24 and under, the client is responsible for removing the container
|
||||||
if versions.LessThan(version, "1.25") {
|
if versions.LessThan(version, "1.25") {
|
||||||
|
@ -604,14 +638,6 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if versions.LessThan(version, "1.45") {
|
|
||||||
for _, m := range hostConfig.Mounts {
|
|
||||||
if m.VolumeOptions != nil && m.VolumeOptions.Subpath != "" {
|
|
||||||
return errdefs.InvalidParameter(errors.New("VolumeOptions.Subpath needs API v1.45 or newer"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var warnings []string
|
var warnings []string
|
||||||
if warn, err := handleMACAddressBC(config, hostConfig, networkingConfig, version); err != nil {
|
if warn, err := handleMACAddressBC(config, hostConfig, networkingConfig, version); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -632,6 +658,7 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
||||||
Config: config,
|
Config: config,
|
||||||
HostConfig: hostConfig,
|
HostConfig: hostConfig,
|
||||||
NetworkingConfig: networkingConfig,
|
NetworkingConfig: networkingConfig,
|
||||||
|
AdjustCPUShares: adjustCPUShares,
|
||||||
Platform: platform,
|
Platform: platform,
|
||||||
DefaultReadOnlyNonRecursive: defaultReadOnlyNonRecursive,
|
DefaultReadOnlyNonRecursive: defaultReadOnlyNonRecursive,
|
||||||
})
|
})
|
||||||
|
@ -658,7 +685,7 @@ func handleMACAddressBC(config *container.Config, hostConfig *container.HostConf
|
||||||
}
|
}
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
if !hostConfig.NetworkMode.IsBridge() && !hostConfig.NetworkMode.IsUserDefined() {
|
if !hostConfig.NetworkMode.IsDefault() && !hostConfig.NetworkMode.IsBridge() && !hostConfig.NetworkMode.IsUserDefined() {
|
||||||
return "", runconfig.ErrConflictContainerNetworkAndMac
|
return "", runconfig.ErrConflictContainerNetworkAndMac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -687,7 +714,7 @@ func handleMACAddressBC(config *container.Config, hostConfig *container.HostConf
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
var warning string
|
var warning string
|
||||||
if hostConfig.NetworkMode.IsBridge() || hostConfig.NetworkMode.IsUserDefined() {
|
if hostConfig.NetworkMode.IsDefault() || hostConfig.NetworkMode.IsBridge() || hostConfig.NetworkMode.IsUserDefined() {
|
||||||
nwName := hostConfig.NetworkMode.NetworkName()
|
nwName := hostConfig.NetworkMode.NetworkName()
|
||||||
// If there's no endpoint config, create a place to store the configured address.
|
// If there's no endpoint config, create a place to store the configured address.
|
||||||
if len(networkingConfig.EndpointsConfig) == 0 {
|
if len(networkingConfig.EndpointsConfig) == 0 {
|
||||||
|
|
|
@ -11,10 +11,49 @@ import (
|
||||||
|
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/versions"
|
||||||
gddohttputil "github.com/golang/gddo/httputil"
|
gddohttputil "github.com/golang/gddo/httputil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// setContainerPathStatHeader encodes the stat to JSON, base64 encode, and place in a header.
|
type pathError struct{}
|
||||||
|
|
||||||
|
func (pathError) Error() string {
|
||||||
|
return "Path cannot be empty"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pathError) InvalidParameter() {}
|
||||||
|
|
||||||
|
// postContainersCopy is deprecated in favor of getContainersArchive.
|
||||||
|
//
|
||||||
|
// Deprecated since 1.8 (API v1.20), errors out since 1.12 (API v1.24)
|
||||||
|
func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
if versions.GreaterThanOrEqualTo(version, "1.24") {
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := types.CopyConfig{}
|
||||||
|
if err := httputils.ReadJSON(r, &cfg); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.Resource == "" {
|
||||||
|
return pathError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer data.Close()
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/x-tar")
|
||||||
|
_, err = io.Copy(w, data)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// // Encode the stat to JSON, base64 encode, and place in a header.
|
||||||
func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error {
|
func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error {
|
||||||
statJSON, err := json.Marshal(stat)
|
statJSON, err := json.Marshal(stat)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -71,6 +71,15 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
version := httputils.VersionFromContext(ctx)
|
||||||
|
if versions.LessThan(version, "1.22") {
|
||||||
|
// API versions before 1.22 did not enforce application/json content-type.
|
||||||
|
// Allow older clients to work by patching the content-type.
|
||||||
|
if r.Header.Get("Content-Type") != "application/json" {
|
||||||
|
r.Header.Set("Content-Type", "application/json")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
execName = vars["name"]
|
execName = vars["name"]
|
||||||
stdin, inStream io.ReadCloser
|
stdin, inStream io.ReadCloser
|
||||||
|
@ -87,8 +96,6 @@ func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.Res
|
||||||
}
|
}
|
||||||
|
|
||||||
if execStartCheck.ConsoleSize != nil {
|
if execStartCheck.ConsoleSize != nil {
|
||||||
version := httputils.VersionFromContext(ctx)
|
|
||||||
|
|
||||||
// Not supported before 1.42
|
// Not supported before 1.42
|
||||||
if versions.LessThan(version, "1.42") {
|
if versions.LessThan(version, "1.42") {
|
||||||
execStartCheck.ConsoleSize = nil
|
execStartCheck.ConsoleSize = nil
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
"github.com/docker/distribution"
|
"github.com/docker/distribution"
|
||||||
|
@ -13,7 +12,6 @@ import (
|
||||||
"github.com/docker/distribution/manifest/schema2"
|
"github.com/docker/distribution/manifest/schema2"
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/docker/api/types/registry"
|
||||||
distributionpkg "github.com/docker/docker/distribution"
|
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -26,10 +24,10 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
imgName := vars["name"]
|
image := vars["name"]
|
||||||
|
|
||||||
// TODO why is reference.ParseAnyReference() / reference.ParseNormalizedNamed() not using the reference.ErrTagInvalidFormat (and so on) errors?
|
// TODO why is reference.ParseAnyReference() / reference.ParseNormalizedNamed() not using the reference.ErrTagInvalidFormat (and so on) errors?
|
||||||
ref, err := reference.ParseAnyReference(imgName)
|
ref, err := reference.ParseAnyReference(image)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errdefs.InvalidParameter(err)
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
@ -39,7 +37,7 @@ func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.Res
|
||||||
// full image ID
|
// full image ID
|
||||||
return errors.Errorf("no manifest found for full image ID")
|
return errors.Errorf("no manifest found for full image ID")
|
||||||
}
|
}
|
||||||
return errdefs.InvalidParameter(errors.Errorf("unknown image reference format: %s", imgName))
|
return errdefs.InvalidParameter(errors.Errorf("unknown image reference format: %s", image))
|
||||||
}
|
}
|
||||||
|
|
||||||
// For a search it is not an error if no auth was given. Ignore invalid
|
// For a search it is not an error if no auth was given. Ignore invalid
|
||||||
|
@ -155,9 +153,6 @@ func (s *distributionRouter) fetchManifest(ctx context.Context, distrepo distrib
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case *schema1.SignedManifest:
|
case *schema1.SignedManifest:
|
||||||
if os.Getenv("DOCKER_ENABLE_DEPRECATED_PULL_SCHEMA_1_IMAGE") == "" {
|
|
||||||
return registry.DistributionInspect{}, distributionpkg.DeprecatedSchema1ImageError(namedRef)
|
|
||||||
}
|
|
||||||
platform := ocispec.Platform{
|
platform := ocispec.Platform{
|
||||||
Architecture: mnfstObj.Architecture,
|
Architecture: mnfstObj.Architecture,
|
||||||
OS: "linux",
|
OS: "linux",
|
||||||
|
|
|
@ -21,7 +21,7 @@ type grpcRouter struct {
|
||||||
// NewRouter initializes a new grpc http router
|
// NewRouter initializes a new grpc http router
|
||||||
func NewRouter(backends ...Backend) router.Router {
|
func NewRouter(backends ...Backend) router.Router {
|
||||||
unary := grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptor(), grpcerrors.UnaryServerInterceptor))
|
unary := grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unaryInterceptor(), grpcerrors.UnaryServerInterceptor))
|
||||||
stream := grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(otelgrpc.StreamServerInterceptor(), grpcerrors.StreamServerInterceptor)) //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/moby/issues/47437
|
stream := grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(otelgrpc.StreamServerInterceptor(), grpcerrors.StreamServerInterceptor))
|
||||||
|
|
||||||
r := &grpcRouter{
|
r := &grpcRouter{
|
||||||
h2Server: &http2.Server{},
|
h2Server: &http2.Server{},
|
||||||
|
@ -46,7 +46,7 @@ func (gr *grpcRouter) initRoutes() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func unaryInterceptor() grpc.UnaryServerInterceptor {
|
func unaryInterceptor() grpc.UnaryServerInterceptor {
|
||||||
withTrace := otelgrpc.UnaryServerInterceptor() //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/moby/issues/47437
|
withTrace := otelgrpc.UnaryServerInterceptor()
|
||||||
|
|
||||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||||
// This method is used by the clients to send their traces to buildkit so they can be included
|
// This method is used by the clients to send their traces to buildkit so they can be included
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/image"
|
"github.com/docker/docker/api/types/image"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/docker/api/types/registry"
|
||||||
|
@ -25,8 +24,8 @@ type Backend interface {
|
||||||
type imageBackend interface {
|
type imageBackend interface {
|
||||||
ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]image.DeleteResponse, error)
|
ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]image.DeleteResponse, error)
|
||||||
ImageHistory(ctx context.Context, imageName string) ([]*image.HistoryResponseItem, error)
|
ImageHistory(ctx context.Context, imageName string) ([]*image.HistoryResponseItem, error)
|
||||||
Images(ctx context.Context, opts image.ListOptions) ([]*image.Summary, error)
|
Images(ctx context.Context, opts types.ImageListOptions) ([]*image.Summary, error)
|
||||||
GetImage(ctx context.Context, refOrID string, options backend.GetImageOpts) (*dockerimage.Image, error)
|
GetImage(ctx context.Context, refOrID string, options image.GetImageOpts) (*dockerimage.Image, error)
|
||||||
TagImage(ctx context.Context, id dockerimage.ID, newRef reference.Named) error
|
TagImage(ctx context.Context, id dockerimage.ID, newRef reference.Named) error
|
||||||
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error)
|
ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error)
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,9 +15,8 @@ import (
|
||||||
"github.com/docker/docker/api"
|
"github.com/docker/docker/api"
|
||||||
"github.com/docker/docker/api/server/httputils"
|
"github.com/docker/docker/api/server/httputils"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
imagetypes "github.com/docker/docker/api/types/image"
|
opts "github.com/docker/docker/api/types/image"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/docker/api/types/registry"
|
||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
"github.com/docker/docker/builder/remotecontext"
|
"github.com/docker/docker/builder/remotecontext"
|
||||||
|
@ -73,9 +72,9 @@ func (ir *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrit
|
||||||
// Special case: "pull -a" may send an image name with a
|
// Special case: "pull -a" may send an image name with a
|
||||||
// trailing :. This is ugly, but let's not break API
|
// trailing :. This is ugly, but let's not break API
|
||||||
// compatibility.
|
// compatibility.
|
||||||
imgName := strings.TrimSuffix(img, ":")
|
image := strings.TrimSuffix(img, ":")
|
||||||
|
|
||||||
ref, err := reference.ParseNormalizedNamed(imgName)
|
ref, err := reference.ParseNormalizedNamed(image)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errdefs.InvalidParameter(err)
|
return errdefs.InvalidParameter(err)
|
||||||
}
|
}
|
||||||
|
@ -190,7 +189,7 @@ func (ir *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter
|
||||||
|
|
||||||
var ref reference.Named
|
var ref reference.Named
|
||||||
|
|
||||||
// Tag is empty only in case PushOptions.All is true.
|
// Tag is empty only in case ImagePushOptions.All is true.
|
||||||
if tag != "" {
|
if tag != "" {
|
||||||
r, err := httputils.RepoTagReference(img, tag)
|
r, err := httputils.RepoTagReference(img, tag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -286,7 +285,7 @@ func (ir *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ir *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
func (ir *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||||
img, err := ir.backend.GetImage(ctx, vars["name"], backend.GetImageOpts{Details: true})
|
img, err := ir.backend.GetImage(ctx, vars["name"], opts.GetImageOpts{Details: true})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -306,10 +305,6 @@ func (ir *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWrite
|
||||||
imageInspect.Created = time.Time{}.Format(time.RFC3339Nano)
|
imageInspect.Created = time.Time{}.Format(time.RFC3339Nano)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if versions.GreaterThanOrEqualTo(version, "1.45") {
|
|
||||||
imageInspect.Container = "" //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
|
|
||||||
imageInspect.ContainerConfig = nil //nolint:staticcheck // ignore SA1019: field is deprecated, but still set on API < v1.45.
|
|
||||||
}
|
|
||||||
return httputils.WriteJSON(w, http.StatusOK, imageInspect)
|
return httputils.WriteJSON(w, http.StatusOK, imageInspect)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -364,7 +359,7 @@ func (ir *imageRouter) toImageInspect(img *image.Image) (*types.ImageInspect, er
|
||||||
Data: img.Details.Metadata,
|
Data: img.Details.Metadata,
|
||||||
},
|
},
|
||||||
RootFS: rootFSToAPIType(img.RootFS),
|
RootFS: rootFSToAPIType(img.RootFS),
|
||||||
Metadata: imagetypes.Metadata{
|
Metadata: opts.Metadata{
|
||||||
LastTagTime: img.Details.LastUpdated,
|
LastTagTime: img.Details.LastUpdated,
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -406,7 +401,7 @@ func (ir *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter,
|
||||||
sharedSize = httputils.BoolValue(r, "shared-size")
|
sharedSize = httputils.BoolValue(r, "shared-size")
|
||||||
}
|
}
|
||||||
|
|
||||||
images, err := ir.backend.Images(ctx, imagetypes.ListOptions{
|
images, err := ir.backend.Images(ctx, types.ImageListOptions{
|
||||||
All: httputils.BoolValue(r, "all"),
|
All: httputils.BoolValue(r, "all"),
|
||||||
Filters: imageFilters,
|
Filters: imageFilters,
|
||||||
SharedSize: sharedSize,
|
SharedSize: sharedSize,
|
||||||
|
@ -463,7 +458,7 @@ func (ir *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter,
|
||||||
return errdefs.InvalidParameter(errors.New("refusing to create an ambiguous tag using digest algorithm as name"))
|
return errdefs.InvalidParameter(errors.New("refusing to create an ambiguous tag using digest algorithm as name"))
|
||||||
}
|
}
|
||||||
|
|
||||||
img, err := ir.backend.GetImage(ctx, vars["name"], backend.GetImageOpts{})
|
img, err := ir.backend.GetImage(ctx, vars["name"], opts.GetImageOpts{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errdefs.NotFound(err)
|
return errdefs.NotFound(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -213,10 +213,6 @@ func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWr
|
||||||
return libnetwork.NetworkNameError(create.Name)
|
return libnetwork.NetworkNameError(create.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// For a Swarm-scoped network, this call to backend.CreateNetwork is used to
|
|
||||||
// validate the configuration. The network will not be created but, if the
|
|
||||||
// configuration is valid, ManagerRedirectError will be returned and handled
|
|
||||||
// below.
|
|
||||||
nw, err := n.backend.CreateNetwork(create)
|
nw, err := n.backend.CreateNetwork(create)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if _, ok := err.(libnetwork.ManagerRedirectError); !ok {
|
if _, ok := err.(libnetwork.ManagerRedirectError); !ok {
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
"github.com/docker/docker/api/server/middleware"
|
"github.com/docker/docker/api/server/middleware"
|
||||||
"github.com/docker/docker/api/server/router"
|
"github.com/docker/docker/api/server/router"
|
||||||
"github.com/docker/docker/api/server/router/debug"
|
"github.com/docker/docker/api/server/router/debug"
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/dockerversion"
|
"github.com/docker/docker/dockerversion"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||||
|
@ -58,13 +57,19 @@ func (s *Server) makeHTTPHandler(handler httputils.APIFunc, operation string) ht
|
||||||
if statusCode >= 500 {
|
if statusCode >= 500 {
|
||||||
log.G(ctx).Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err)
|
log.G(ctx).Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err)
|
||||||
}
|
}
|
||||||
_ = httputils.WriteJSON(w, statusCode, &types.ErrorResponse{
|
makeErrorHandler(err)(w, r)
|
||||||
Message: err.Error(),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}), operation).ServeHTTP
|
}), operation).ServeHTTP
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type pageNotFoundError struct{}
|
||||||
|
|
||||||
|
func (pageNotFoundError) Error() string {
|
||||||
|
return "page not found"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pageNotFoundError) NotFound() {}
|
||||||
|
|
||||||
// CreateMux returns a new mux with all the routers registered.
|
// CreateMux returns a new mux with all the routers registered.
|
||||||
func (s *Server) CreateMux(routers ...router.Router) *mux.Router {
|
func (s *Server) CreateMux(routers ...router.Router) *mux.Router {
|
||||||
m := mux.NewRouter()
|
m := mux.NewRouter()
|
||||||
|
@ -86,12 +91,7 @@ func (s *Server) CreateMux(routers ...router.Router) *mux.Router {
|
||||||
m.Path("/debug" + r.Path()).Handler(f)
|
m.Path("/debug" + r.Path()).Handler(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
notFoundHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
notFoundHandler := makeErrorHandler(pageNotFoundError{})
|
||||||
_ = httputils.WriteJSON(w, http.StatusNotFound, &types.ErrorResponse{
|
|
||||||
Message: "page not found",
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler)
|
m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler)
|
||||||
m.NotFoundHandler = notFoundHandler
|
m.NotFoundHandler = notFoundHandler
|
||||||
m.MethodNotAllowedHandler = notFoundHandler
|
m.MethodNotAllowedHandler = notFoundHandler
|
||||||
|
|
|
@ -15,11 +15,8 @@ import (
|
||||||
func TestMiddlewares(t *testing.T) {
|
func TestMiddlewares(t *testing.T) {
|
||||||
srv := &Server{}
|
srv := &Server{}
|
||||||
|
|
||||||
m, err := middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinSupportedAPIVersion)
|
const apiMinVersion = "1.12"
|
||||||
if err != nil {
|
srv.UseMiddleware(middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, apiMinVersion))
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
srv.UseMiddleware(*m)
|
|
||||||
|
|
||||||
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
req, _ := http.NewRequest(http.MethodGet, "/containers/json", nil)
|
||||||
resp := httptest.NewRecorder()
|
resp := httptest.NewRecorder()
|
||||||
|
|
|
@ -19,10 +19,10 @@ produces:
|
||||||
consumes:
|
consumes:
|
||||||
- "application/json"
|
- "application/json"
|
||||||
- "text/plain"
|
- "text/plain"
|
||||||
basePath: "/v1.45"
|
basePath: "/v1.44"
|
||||||
info:
|
info:
|
||||||
title: "Docker Engine API"
|
title: "Docker Engine API"
|
||||||
version: "1.45"
|
version: "1.44"
|
||||||
x-logo:
|
x-logo:
|
||||||
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
|
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
|
||||||
description: |
|
description: |
|
||||||
|
@ -55,8 +55,8 @@ info:
|
||||||
the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
|
the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
|
||||||
is returned.
|
is returned.
|
||||||
|
|
||||||
If you omit the version-prefix, the current version of the API (v1.45) is used.
|
If you omit the version-prefix, the current version of the API (v1.44) is used.
|
||||||
For example, calling `/info` is the same as calling `/v1.45/info`. Using the
|
For example, calling `/info` is the same as calling `/v1.44/info`. Using the
|
||||||
API without a version-prefix is deprecated and will be removed in a future release.
|
API without a version-prefix is deprecated and will be removed in a future release.
|
||||||
|
|
||||||
Engine releases in the near future should support this version of the API,
|
Engine releases in the near future should support this version of the API,
|
||||||
|
@ -427,10 +427,6 @@ definitions:
|
||||||
type: "object"
|
type: "object"
|
||||||
additionalProperties:
|
additionalProperties:
|
||||||
type: "string"
|
type: "string"
|
||||||
Subpath:
|
|
||||||
description: "Source path inside the volume. Must be relative without any back traversals."
|
|
||||||
type: "string"
|
|
||||||
example: "dir-inside-volume/subdirectory"
|
|
||||||
TmpfsOptions:
|
TmpfsOptions:
|
||||||
description: "Optional configuration for the `tmpfs` type."
|
description: "Optional configuration for the `tmpfs` type."
|
||||||
type: "object"
|
type: "object"
|
||||||
|
@ -8774,7 +8770,8 @@ paths:
|
||||||
|
|
||||||
<p><br /></p>
|
<p><br /></p>
|
||||||
|
|
||||||
> **Deprecated**: This field is deprecated and will always be "false".
|
> **Deprecated**: This field is deprecated and will always
|
||||||
|
> be "false" in future.
|
||||||
type: "boolean"
|
type: "boolean"
|
||||||
example: false
|
example: false
|
||||||
name:
|
name:
|
||||||
|
@ -8817,8 +8814,13 @@ paths:
|
||||||
description: |
|
description: |
|
||||||
A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:
|
A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters:
|
||||||
|
|
||||||
|
- `is-automated=(true|false)` (deprecated, see below)
|
||||||
- `is-official=(true|false)`
|
- `is-official=(true|false)`
|
||||||
- `stars=<number>` Matches images that has at least 'number' stars.
|
- `stars=<number>` Matches images that has at least 'number' stars.
|
||||||
|
|
||||||
|
The `is-automated` filter is deprecated. The `is_automated` field has
|
||||||
|
been deprecated by Docker Hub's search API. Consequently, searching
|
||||||
|
for `is-automated=true` will yield no results.
|
||||||
type: "string"
|
type: "string"
|
||||||
tags: ["Image"]
|
tags: ["Image"]
|
||||||
/images/prune:
|
/images/prune:
|
||||||
|
|
|
@ -18,6 +18,7 @@ type ContainerCreateConfig struct {
|
||||||
HostConfig *container.HostConfig
|
HostConfig *container.HostConfig
|
||||||
NetworkingConfig *network.NetworkingConfig
|
NetworkingConfig *network.NetworkingConfig
|
||||||
Platform *ocispec.Platform
|
Platform *ocispec.Platform
|
||||||
|
AdjustCPUShares bool
|
||||||
DefaultReadOnlyNonRecursive bool
|
DefaultReadOnlyNonRecursive bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,6 +91,7 @@ type ContainerStatsConfig struct {
|
||||||
Stream bool
|
Stream bool
|
||||||
OneShot bool
|
OneShot bool
|
||||||
OutStream io.Writer
|
OutStream io.Writer
|
||||||
|
Version string
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecInspect holds information about a running process started
|
// ExecInspect holds information about a running process started
|
||||||
|
@ -129,13 +131,6 @@ type CreateImageConfig struct {
|
||||||
Changes []string
|
Changes []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetImageOpts holds parameters to retrieve image information
|
|
||||||
// from the backend.
|
|
||||||
type GetImageOpts struct {
|
|
||||||
Platform *ocispec.Platform
|
|
||||||
Details bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// CommitConfig is the configuration for creating an image as part of a build.
|
// CommitConfig is the configuration for creating an image as part of a build.
|
||||||
type CommitConfig struct {
|
type CommitConfig struct {
|
||||||
Author string
|
Author string
|
||||||
|
|
|
@ -157,12 +157,42 @@ type ImageBuildResponse struct {
|
||||||
OSType string
|
OSType string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ImageCreateOptions holds information to create images.
|
||||||
|
type ImageCreateOptions struct {
|
||||||
|
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry.
|
||||||
|
Platform string // Platform is the target platform of the image if it needs to be pulled from the registry.
|
||||||
|
}
|
||||||
|
|
||||||
// ImageImportSource holds source information for ImageImport
|
// ImageImportSource holds source information for ImageImport
|
||||||
type ImageImportSource struct {
|
type ImageImportSource struct {
|
||||||
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
|
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
|
||||||
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
|
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ImageImportOptions holds information to import images from the client host.
|
||||||
|
type ImageImportOptions struct {
|
||||||
|
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
|
||||||
|
Message string // Message is the message to tag the image with
|
||||||
|
Changes []string // Changes are the raw changes to apply to this image
|
||||||
|
Platform string // Platform is the target platform of the image
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImageListOptions holds parameters to list images with.
|
||||||
|
type ImageListOptions struct {
|
||||||
|
// All controls whether all images in the graph are filtered, or just
|
||||||
|
// the heads.
|
||||||
|
All bool
|
||||||
|
|
||||||
|
// Filters is a JSON-encoded set of filter arguments.
|
||||||
|
Filters filters.Args
|
||||||
|
|
||||||
|
// SharedSize indicates whether the shared size of images should be computed.
|
||||||
|
SharedSize bool
|
||||||
|
|
||||||
|
// ContainerCount indicates whether container count should be computed.
|
||||||
|
ContainerCount bool
|
||||||
|
}
|
||||||
|
|
||||||
// ImageLoadResponse returns information to the client about a load process.
|
// ImageLoadResponse returns information to the client about a load process.
|
||||||
type ImageLoadResponse struct {
|
type ImageLoadResponse struct {
|
||||||
// Body must be closed to avoid a resource leak
|
// Body must be closed to avoid a resource leak
|
||||||
|
@ -170,6 +200,14 @@ type ImageLoadResponse struct {
|
||||||
JSON bool
|
JSON bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ImagePullOptions holds information to pull images.
|
||||||
|
type ImagePullOptions struct {
|
||||||
|
All bool
|
||||||
|
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||||
|
PrivilegeFunc RequestPrivilegeFunc
|
||||||
|
Platform string
|
||||||
|
}
|
||||||
|
|
||||||
// RequestPrivilegeFunc is a function interface that
|
// RequestPrivilegeFunc is a function interface that
|
||||||
// clients can supply to retry operations after
|
// clients can supply to retry operations after
|
||||||
// getting an authorization error.
|
// getting an authorization error.
|
||||||
|
@ -178,6 +216,15 @@ type ImageLoadResponse struct {
|
||||||
// if the privilege request fails.
|
// if the privilege request fails.
|
||||||
type RequestPrivilegeFunc func() (string, error)
|
type RequestPrivilegeFunc func() (string, error)
|
||||||
|
|
||||||
|
// ImagePushOptions holds information to push images.
|
||||||
|
type ImagePushOptions ImagePullOptions
|
||||||
|
|
||||||
|
// ImageRemoveOptions holds parameters to remove images.
|
||||||
|
type ImageRemoveOptions struct {
|
||||||
|
Force bool
|
||||||
|
PruneChildren bool
|
||||||
|
}
|
||||||
|
|
||||||
// ImageSearchOptions holds parameters to search images with.
|
// ImageSearchOptions holds parameters to search images with.
|
||||||
type ImageSearchOptions struct {
|
type ImageSearchOptions struct {
|
||||||
RegistryAuth string
|
RegistryAuth string
|
||||||
|
|
|
@ -5,8 +5,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/strslice"
|
"github.com/docker/docker/api/types/strslice"
|
||||||
|
dockerspec "github.com/docker/docker/image/spec/specs-go/v1"
|
||||||
"github.com/docker/go-connections/nat"
|
"github.com/docker/go-connections/nat"
|
||||||
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// MinimumDuration puts a minimum on user configured duration.
|
// MinimumDuration puts a minimum on user configured duration.
|
||||||
|
|
|
@ -1,57 +1,9 @@
|
||||||
package image
|
package image
|
||||||
|
|
||||||
import "github.com/docker/docker/api/types/filters"
|
import ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
|
|
||||||
// ImportOptions holds information to import images from the client host.
|
// GetImageOpts holds parameters to inspect an image.
|
||||||
type ImportOptions struct {
|
type GetImageOpts struct {
|
||||||
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
|
Platform *ocispec.Platform
|
||||||
Message string // Message is the message to tag the image with
|
Details bool
|
||||||
Changes []string // Changes are the raw changes to apply to this image
|
|
||||||
Platform string // Platform is the target platform of the image
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateOptions holds information to create images.
|
|
||||||
type CreateOptions struct {
|
|
||||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry.
|
|
||||||
Platform string // Platform is the target platform of the image if it needs to be pulled from the registry.
|
|
||||||
}
|
|
||||||
|
|
||||||
// PullOptions holds information to pull images.
|
|
||||||
type PullOptions struct {
|
|
||||||
All bool
|
|
||||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
|
||||||
|
|
||||||
// PrivilegeFunc is a function that clients can supply to retry operations
|
|
||||||
// after getting an authorization error. This function returns the registry
|
|
||||||
// authentication header value in base64 encoded format, or an error if the
|
|
||||||
// privilege request fails.
|
|
||||||
//
|
|
||||||
// Also see [github.com/docker/docker/api/types.RequestPrivilegeFunc].
|
|
||||||
PrivilegeFunc func() (string, error)
|
|
||||||
Platform string
|
|
||||||
}
|
|
||||||
|
|
||||||
// PushOptions holds information to push images.
|
|
||||||
type PushOptions PullOptions
|
|
||||||
|
|
||||||
// ListOptions holds parameters to list images with.
|
|
||||||
type ListOptions struct {
|
|
||||||
// All controls whether all images in the graph are filtered, or just
|
|
||||||
// the heads.
|
|
||||||
All bool
|
|
||||||
|
|
||||||
// Filters is a JSON-encoded set of filter arguments.
|
|
||||||
Filters filters.Args
|
|
||||||
|
|
||||||
// SharedSize indicates whether the shared size of images should be computed.
|
|
||||||
SharedSize bool
|
|
||||||
|
|
||||||
// ContainerCount indicates whether container count should be computed.
|
|
||||||
ContainerCount bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveOptions holds parameters to remove images.
|
|
||||||
type RemoveOptions struct {
|
|
||||||
Force bool
|
|
||||||
PruneChildren bool
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -96,7 +96,6 @@ type BindOptions struct {
|
||||||
type VolumeOptions struct {
|
type VolumeOptions struct {
|
||||||
NoCopy bool `json:",omitempty"`
|
NoCopy bool `json:",omitempty"`
|
||||||
Labels map[string]string `json:",omitempty"`
|
Labels map[string]string `json:",omitempty"`
|
||||||
Subpath string `json:",omitempty"`
|
|
||||||
DriverConfig *Driver `json:",omitempty"`
|
DriverConfig *Driver `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,7 @@ type SearchResult struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
// IsAutomated indicates whether the result is automated.
|
// IsAutomated indicates whether the result is automated.
|
||||||
//
|
//
|
||||||
// Deprecated: the "is_automated" field is deprecated and will always be "false".
|
// Deprecated: the "is_automated" field is deprecated and will always be "false" in the future.
|
||||||
IsAutomated bool `json:"is_automated"`
|
IsAutomated bool `json:"is_automated"`
|
||||||
// Description is a textual description of the repository
|
// Description is a textual description of the repository
|
||||||
Description string `json:"description"`
|
Description string `json:"description"`
|
||||||
|
|
|
@ -82,7 +82,7 @@ type ImageInspect struct {
|
||||||
// Depending on how the image was created, this field may be empty.
|
// Depending on how the image was created, this field may be empty.
|
||||||
//
|
//
|
||||||
// Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
|
// Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
|
||||||
Container string `json:",omitempty"`
|
Container string
|
||||||
|
|
||||||
// ContainerConfig is an optional field containing the configuration of the
|
// ContainerConfig is an optional field containing the configuration of the
|
||||||
// container that was last committed when creating the image.
|
// container that was last committed when creating the image.
|
||||||
|
@ -91,7 +91,7 @@ type ImageInspect struct {
|
||||||
// and it is not in active use anymore.
|
// and it is not in active use anymore.
|
||||||
//
|
//
|
||||||
// Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
|
// Deprecated: this field is omitted in API v1.45, but kept for backward compatibility.
|
||||||
ContainerConfig *container.Config `json:",omitempty"`
|
ContainerConfig *container.Config
|
||||||
|
|
||||||
// DockerVersion is the version of Docker that was used to build the image.
|
// DockerVersion is the version of Docker that was used to build the image.
|
||||||
//
|
//
|
||||||
|
|
|
@ -1,35 +1,138 @@
|
||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/docker/docker/api/types/checkpoint"
|
||||||
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/image"
|
"github.com/docker/docker/api/types/image"
|
||||||
|
"github.com/docker/docker/api/types/swarm"
|
||||||
|
"github.com/docker/docker/api/types/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ImageImportOptions holds information to import images from the client host.
|
// CheckpointCreateOptions holds parameters to create a checkpoint from a container.
|
||||||
//
|
//
|
||||||
// Deprecated: use [image.ImportOptions].
|
// Deprecated: use [checkpoint.CreateOptions].
|
||||||
type ImageImportOptions = image.ImportOptions
|
type CheckpointCreateOptions = checkpoint.CreateOptions
|
||||||
|
|
||||||
// ImageCreateOptions holds information to create images.
|
// CheckpointListOptions holds parameters to list checkpoints for a container
|
||||||
//
|
//
|
||||||
// Deprecated: use [image.CreateOptions].
|
// Deprecated: use [checkpoint.ListOptions].
|
||||||
type ImageCreateOptions = image.CreateOptions
|
type CheckpointListOptions = checkpoint.ListOptions
|
||||||
|
|
||||||
// ImagePullOptions holds information to pull images.
|
// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container
|
||||||
//
|
//
|
||||||
// Deprecated: use [image.PullOptions].
|
// Deprecated: use [checkpoint.DeleteOptions].
|
||||||
type ImagePullOptions = image.PullOptions
|
type CheckpointDeleteOptions = checkpoint.DeleteOptions
|
||||||
|
|
||||||
// ImagePushOptions holds information to push images.
|
// Checkpoint represents the details of a checkpoint when listing endpoints.
|
||||||
//
|
//
|
||||||
// Deprecated: use [image.PushOptions].
|
// Deprecated: use [checkpoint.Summary].
|
||||||
type ImagePushOptions = image.PushOptions
|
type Checkpoint = checkpoint.Summary
|
||||||
|
|
||||||
// ImageListOptions holds parameters to list images with.
|
// Info contains response of Engine API:
|
||||||
|
// GET "/info"
|
||||||
//
|
//
|
||||||
// Deprecated: use [image.ListOptions].
|
// Deprecated: use [system.Info].
|
||||||
type ImageListOptions = image.ListOptions
|
type Info = system.Info
|
||||||
|
|
||||||
// ImageRemoveOptions holds parameters to remove images.
|
// Commit holds the Git-commit (SHA1) that a binary was built from, as reported
|
||||||
|
// in the version-string of external tools, such as containerd, or runC.
|
||||||
//
|
//
|
||||||
// Deprecated: use [image.RemoveOptions].
|
// Deprecated: use [system.Commit].
|
||||||
type ImageRemoveOptions = image.RemoveOptions
|
type Commit = system.Commit
|
||||||
|
|
||||||
|
// PluginsInfo is a temp struct holding Plugins name
|
||||||
|
// registered with docker daemon. It is used by [system.Info] struct
|
||||||
|
//
|
||||||
|
// Deprecated: use [system.PluginsInfo].
|
||||||
|
type PluginsInfo = system.PluginsInfo
|
||||||
|
|
||||||
|
// NetworkAddressPool is a temp struct used by [system.Info] struct.
|
||||||
|
//
|
||||||
|
// Deprecated: use [system.NetworkAddressPool].
|
||||||
|
type NetworkAddressPool = system.NetworkAddressPool
|
||||||
|
|
||||||
|
// Runtime describes an OCI runtime.
|
||||||
|
//
|
||||||
|
// Deprecated: use [system.Runtime].
|
||||||
|
type Runtime = system.Runtime
|
||||||
|
|
||||||
|
// SecurityOpt contains the name and options of a security option.
|
||||||
|
//
|
||||||
|
// Deprecated: use [system.SecurityOpt].
|
||||||
|
type SecurityOpt = system.SecurityOpt
|
||||||
|
|
||||||
|
// KeyValue holds a key/value pair.
|
||||||
|
//
|
||||||
|
// Deprecated: use [system.KeyValue].
|
||||||
|
type KeyValue = system.KeyValue
|
||||||
|
|
||||||
|
// ImageDeleteResponseItem image delete response item.
|
||||||
|
//
|
||||||
|
// Deprecated: use [image.DeleteResponse].
|
||||||
|
type ImageDeleteResponseItem = image.DeleteResponse
|
||||||
|
|
||||||
|
// ImageSummary image summary.
|
||||||
|
//
|
||||||
|
// Deprecated: use [image.Summary].
|
||||||
|
type ImageSummary = image.Summary
|
||||||
|
|
||||||
|
// ImageMetadata contains engine-local data about the image.
|
||||||
|
//
|
||||||
|
// Deprecated: use [image.Metadata].
|
||||||
|
type ImageMetadata = image.Metadata
|
||||||
|
|
||||||
|
// ServiceCreateResponse contains the information returned to a client
|
||||||
|
// on the creation of a new service.
|
||||||
|
//
|
||||||
|
// Deprecated: use [swarm.ServiceCreateResponse].
|
||||||
|
type ServiceCreateResponse = swarm.ServiceCreateResponse
|
||||||
|
|
||||||
|
// ServiceUpdateResponse service update response.
|
||||||
|
//
|
||||||
|
// Deprecated: use [swarm.ServiceUpdateResponse].
|
||||||
|
type ServiceUpdateResponse = swarm.ServiceUpdateResponse
|
||||||
|
|
||||||
|
// ContainerStartOptions holds parameters to start containers.
|
||||||
|
//
|
||||||
|
// Deprecated: use [container.StartOptions].
|
||||||
|
type ContainerStartOptions = container.StartOptions
|
||||||
|
|
||||||
|
// ResizeOptions holds parameters to resize a TTY.
|
||||||
|
// It can be used to resize container TTYs and
|
||||||
|
// exec process TTYs too.
|
||||||
|
//
|
||||||
|
// Deprecated: use [container.ResizeOptions].
|
||||||
|
type ResizeOptions = container.ResizeOptions
|
||||||
|
|
||||||
|
// ContainerAttachOptions holds parameters to attach to a container.
|
||||||
|
//
|
||||||
|
// Deprecated: use [container.AttachOptions].
|
||||||
|
type ContainerAttachOptions = container.AttachOptions
|
||||||
|
|
||||||
|
// ContainerCommitOptions holds parameters to commit changes into a container.
|
||||||
|
//
|
||||||
|
// Deprecated: use [container.CommitOptions].
|
||||||
|
type ContainerCommitOptions = container.CommitOptions
|
||||||
|
|
||||||
|
// ContainerListOptions holds parameters to list containers with.
|
||||||
|
//
|
||||||
|
// Deprecated: use [container.ListOptions].
|
||||||
|
type ContainerListOptions = container.ListOptions
|
||||||
|
|
||||||
|
// ContainerLogsOptions holds parameters to filter logs with.
|
||||||
|
//
|
||||||
|
// Deprecated: use [container.LogsOptions].
|
||||||
|
type ContainerLogsOptions = container.LogsOptions
|
||||||
|
|
||||||
|
// ContainerRemoveOptions holds parameters to remove containers.
|
||||||
|
//
|
||||||
|
// Deprecated: use [container.RemoveOptions].
|
||||||
|
type ContainerRemoveOptions = container.RemoveOptions
|
||||||
|
|
||||||
|
// DecodeSecurityOptions decodes a security options string slice to a type safe
|
||||||
|
// [system.SecurityOpt].
|
||||||
|
//
|
||||||
|
// Deprecated: use [system.DecodeSecurityOptions].
|
||||||
|
func DecodeSecurityOptions(opts []string) ([]system.SecurityOpt, error) {
|
||||||
|
return system.DecodeSecurityOptions(opts)
|
||||||
|
}
|
||||||
|
|
14
api/types/versions/README.md
Normal file
14
api/types/versions/README.md
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
# Legacy API type versions
|
||||||
|
|
||||||
|
This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`.
|
||||||
|
|
||||||
|
Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`.
|
||||||
|
|
||||||
|
## Package name conventions
|
||||||
|
|
||||||
|
The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention:
|
||||||
|
|
||||||
|
1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`.
|
||||||
|
2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`.
|
||||||
|
|
||||||
|
For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`.
|
35
api/types/versions/v1p19/types.go
Normal file
35
api/types/versions/v1p19/types.go
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
// Package v1p19 provides specific API types for the API version 1, patch 19.
|
||||||
|
package v1p19 // import "github.com/docker/docker/api/types/versions/v1p19"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/container"
|
||||||
|
"github.com/docker/docker/api/types/versions/v1p20"
|
||||||
|
"github.com/docker/go-connections/nat"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ContainerJSON is a backcompatibility struct for APIs prior to 1.20.
|
||||||
|
// Note this is not used by the Windows daemon.
|
||||||
|
type ContainerJSON struct {
|
||||||
|
*types.ContainerJSONBase
|
||||||
|
Volumes map[string]string
|
||||||
|
VolumesRW map[string]bool
|
||||||
|
Config *ContainerConfig
|
||||||
|
NetworkSettings *v1p20.NetworkSettings
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerConfig is a backcompatibility struct for APIs prior to 1.20.
|
||||||
|
type ContainerConfig struct {
|
||||||
|
*container.Config
|
||||||
|
|
||||||
|
MacAddress string
|
||||||
|
NetworkDisabled bool
|
||||||
|
ExposedPorts map[nat.Port]struct{}
|
||||||
|
|
||||||
|
// backward compatibility, they now live in HostConfig
|
||||||
|
VolumeDriver string
|
||||||
|
Memory int64
|
||||||
|
MemorySwap int64
|
||||||
|
CPUShares int64 `json:"CpuShares"`
|
||||||
|
CPUSet string `json:"Cpuset"`
|
||||||
|
}
|
40
api/types/versions/v1p20/types.go
Normal file
40
api/types/versions/v1p20/types.go
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
// Package v1p20 provides specific API types for the API version 1, patch 20.
|
||||||
|
package v1p20 // import "github.com/docker/docker/api/types/versions/v1p20"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
|
"github.com/docker/docker/api/types/container"
|
||||||
|
"github.com/docker/go-connections/nat"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ContainerJSON is a backcompatibility struct for the API 1.20
|
||||||
|
type ContainerJSON struct {
|
||||||
|
*types.ContainerJSONBase
|
||||||
|
Mounts []types.MountPoint
|
||||||
|
Config *ContainerConfig
|
||||||
|
NetworkSettings *NetworkSettings
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20
|
||||||
|
type ContainerConfig struct {
|
||||||
|
*container.Config
|
||||||
|
|
||||||
|
MacAddress string
|
||||||
|
NetworkDisabled bool
|
||||||
|
ExposedPorts map[nat.Port]struct{}
|
||||||
|
|
||||||
|
// backward compatibility, they now live in HostConfig
|
||||||
|
VolumeDriver string
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatsJSON is a backcompatibility struct used in Stats for APIs prior to 1.21
|
||||||
|
type StatsJSON struct {
|
||||||
|
types.Stats
|
||||||
|
Network types.NetworkStats `json:"network,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetworkSettings is a backward compatible struct for APIs prior to 1.21
|
||||||
|
type NetworkSettings struct {
|
||||||
|
types.NetworkSettingsBase
|
||||||
|
types.DefaultNetworkSettings
|
||||||
|
}
|
|
@ -238,13 +238,13 @@ type TopologyRequirement struct {
|
||||||
// If requisite is specified, all topologies in preferred list MUST
|
// If requisite is specified, all topologies in preferred list MUST
|
||||||
// also be present in the list of requisite topologies.
|
// also be present in the list of requisite topologies.
|
||||||
//
|
//
|
||||||
// If the SP is unable to make the provisioned volume available
|
// If the SP is unable to to make the provisioned volume available
|
||||||
// from any of the preferred topologies, the SP MAY choose a topology
|
// from any of the preferred topologies, the SP MAY choose a topology
|
||||||
// from the list of requisite topologies.
|
// from the list of requisite topologies.
|
||||||
// If the list of requisite topologies is not specified, then the SP
|
// If the list of requisite topologies is not specified, then the SP
|
||||||
// MAY choose from the list of all possible topologies.
|
// MAY choose from the list of all possible topologies.
|
||||||
// If the list of requisite topologies is specified and the SP is
|
// If the list of requisite topologies is specified and the SP is
|
||||||
// unable to make the provisioned volume available from any of the
|
// unable to to make the provisioned volume available from any of the
|
||||||
// requisite topologies it MUST fail the CreateVolume call.
|
// requisite topologies it MUST fail the CreateVolume call.
|
||||||
//
|
//
|
||||||
// Example 1:
|
// Example 1:
|
||||||
|
@ -254,7 +254,7 @@ type TopologyRequirement struct {
|
||||||
// {"region": "R1", "zone": "Z3"}
|
// {"region": "R1", "zone": "Z3"}
|
||||||
// preferred =
|
// preferred =
|
||||||
// {"region": "R1", "zone": "Z3"}
|
// {"region": "R1", "zone": "Z3"}
|
||||||
// then the SP SHOULD first attempt to make the provisioned volume
|
// then the the SP SHOULD first attempt to make the provisioned volume
|
||||||
// available from "zone" "Z3" in the "region" "R1" and fall back to
|
// available from "zone" "Z3" in the "region" "R1" and fall back to
|
||||||
// "zone" "Z2" in the "region" "R1" if that is not possible.
|
// "zone" "Z2" in the "region" "R1" if that is not possible.
|
||||||
//
|
//
|
||||||
|
@ -268,7 +268,7 @@ type TopologyRequirement struct {
|
||||||
// preferred =
|
// preferred =
|
||||||
// {"region": "R1", "zone": "Z4"},
|
// {"region": "R1", "zone": "Z4"},
|
||||||
// {"region": "R1", "zone": "Z2"}
|
// {"region": "R1", "zone": "Z2"}
|
||||||
// then the SP SHOULD first attempt to make the provisioned volume
|
// then the the SP SHOULD first attempt to make the provisioned volume
|
||||||
// accessible from "zone" "Z4" in the "region" "R1" and fall back to
|
// accessible from "zone" "Z4" in the "region" "R1" and fall back to
|
||||||
// "zone" "Z2" in the "region" "R1" if that is not possible. If that
|
// "zone" "Z2" in the "region" "R1" if that is not possible. If that
|
||||||
// is not possible, the SP may choose between either the "zone"
|
// is not possible, the SP may choose between either the "zone"
|
||||||
|
@ -287,7 +287,7 @@ type TopologyRequirement struct {
|
||||||
// preferred =
|
// preferred =
|
||||||
// {"region": "R1", "zone": "Z5"},
|
// {"region": "R1", "zone": "Z5"},
|
||||||
// {"region": "R1", "zone": "Z3"}
|
// {"region": "R1", "zone": "Z3"}
|
||||||
// then the SP SHOULD first attempt to make the provisioned volume
|
// then the the SP SHOULD first attempt to make the provisioned volume
|
||||||
// accessible from the combination of the two "zones" "Z5" and "Z3" in
|
// accessible from the combination of the two "zones" "Z5" and "Z3" in
|
||||||
// the "region" "R1". If that's not possible, it should fall back to
|
// the "region" "R1". If that's not possible, it should fall back to
|
||||||
// a combination of "Z5" and other possibilities from the list of
|
// a combination of "Z5" and other possibilities from the list of
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -35,15 +34,14 @@ import (
|
||||||
pkgprogress "github.com/docker/docker/pkg/progress"
|
pkgprogress "github.com/docker/docker/pkg/progress"
|
||||||
"github.com/docker/docker/reference"
|
"github.com/docker/docker/reference"
|
||||||
"github.com/moby/buildkit/cache"
|
"github.com/moby/buildkit/cache"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client/llb"
|
||||||
"github.com/moby/buildkit/client/llb/sourceresolver"
|
|
||||||
"github.com/moby/buildkit/session"
|
"github.com/moby/buildkit/session"
|
||||||
"github.com/moby/buildkit/solver"
|
"github.com/moby/buildkit/solver"
|
||||||
"github.com/moby/buildkit/solver/pb"
|
"github.com/moby/buildkit/solver/pb"
|
||||||
"github.com/moby/buildkit/source"
|
"github.com/moby/buildkit/source"
|
||||||
"github.com/moby/buildkit/source/containerimage"
|
|
||||||
srctypes "github.com/moby/buildkit/source/types"
|
srctypes "github.com/moby/buildkit/source/types"
|
||||||
"github.com/moby/buildkit/sourcepolicy"
|
"github.com/moby/buildkit/sourcepolicy"
|
||||||
|
policy "github.com/moby/buildkit/sourcepolicy/pb"
|
||||||
spb "github.com/moby/buildkit/sourcepolicy/pb"
|
spb "github.com/moby/buildkit/sourcepolicy/pb"
|
||||||
"github.com/moby/buildkit/util/flightcontrol"
|
"github.com/moby/buildkit/util/flightcontrol"
|
||||||
"github.com/moby/buildkit/util/imageutil"
|
"github.com/moby/buildkit/util/imageutil"
|
||||||
|
@ -82,77 +80,9 @@ func NewSource(opt SourceOpt) (*Source, error) {
|
||||||
return &Source{SourceOpt: opt}, nil
|
return &Source{SourceOpt: opt}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Schemes returns a list of SourceOp identifier schemes that this source
|
// ID returns image scheme identifier
|
||||||
// should match.
|
func (is *Source) ID() string {
|
||||||
func (is *Source) Schemes() []string {
|
return srctypes.DockerImageScheme
|
||||||
return []string{srctypes.DockerImageScheme}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Identifier constructs an Identifier from the given scheme, ref, and attrs,
|
|
||||||
// all of which come from a SourceOp.
|
|
||||||
func (is *Source) Identifier(scheme, ref string, attrs map[string]string, platform *pb.Platform) (source.Identifier, error) {
|
|
||||||
return is.registryIdentifier(ref, attrs, platform)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copied from github.com/moby/buildkit/source/containerimage/source.go
|
|
||||||
func (is *Source) registryIdentifier(ref string, attrs map[string]string, platform *pb.Platform) (source.Identifier, error) {
|
|
||||||
id, err := containerimage.NewImageIdentifier(ref)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if platform != nil {
|
|
||||||
id.Platform = &ocispec.Platform{
|
|
||||||
OS: platform.OS,
|
|
||||||
Architecture: platform.Architecture,
|
|
||||||
Variant: platform.Variant,
|
|
||||||
OSVersion: platform.OSVersion,
|
|
||||||
}
|
|
||||||
if platform.OSFeatures != nil {
|
|
||||||
id.Platform.OSFeatures = append([]string{}, platform.OSFeatures...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range attrs {
|
|
||||||
switch k {
|
|
||||||
case pb.AttrImageResolveMode:
|
|
||||||
rm, err := resolver.ParseImageResolveMode(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
id.ResolveMode = rm
|
|
||||||
case pb.AttrImageRecordType:
|
|
||||||
rt, err := parseImageRecordType(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
id.RecordType = rt
|
|
||||||
case pb.AttrImageLayerLimit:
|
|
||||||
l, err := strconv.Atoi(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "invalid layer limit %s", v)
|
|
||||||
}
|
|
||||||
if l <= 0 {
|
|
||||||
return nil, errors.Errorf("invalid layer limit %s", v)
|
|
||||||
}
|
|
||||||
id.LayerLimit = &l
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return id, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseImageRecordType(v string) (client.UsageRecordType, error) {
|
|
||||||
switch client.UsageRecordType(v) {
|
|
||||||
case "", client.UsageRecordTypeRegular:
|
|
||||||
return client.UsageRecordTypeRegular, nil
|
|
||||||
case client.UsageRecordTypeInternal:
|
|
||||||
return client.UsageRecordTypeInternal, nil
|
|
||||||
case client.UsageRecordTypeFrontend:
|
|
||||||
return client.UsageRecordTypeFrontend, nil
|
|
||||||
default:
|
|
||||||
return "", errors.Errorf("invalid record type %s", v)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (is *Source) resolveLocal(refStr string) (*image.Image, error) {
|
func (is *Source) resolveLocal(refStr string) (*image.Image, error) {
|
||||||
|
@ -177,7 +107,7 @@ type resolveRemoteResult struct {
|
||||||
dt []byte
|
dt []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
|
func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) {
|
||||||
p := platforms.DefaultSpec()
|
p := platforms.DefaultSpec()
|
||||||
if platform != nil {
|
if platform != nil {
|
||||||
p = *platform
|
p = *platform
|
||||||
|
@ -186,36 +116,34 @@ func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocisp
|
||||||
key := "getconfig::" + ref + "::" + platforms.Format(p)
|
key := "getconfig::" + ref + "::" + platforms.Format(p)
|
||||||
res, err := is.g.Do(ctx, key, func(ctx context.Context) (*resolveRemoteResult, error) {
|
res, err := is.g.Do(ctx, key, func(ctx context.Context) (*resolveRemoteResult, error) {
|
||||||
res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g)
|
res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g)
|
||||||
dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform)
|
ref, dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform, []*policy.Policy{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &resolveRemoteResult{ref: ref, dgst: dgst, dt: dt}, nil
|
return &resolveRemoteResult{ref: ref, dgst: dgst, dt: dt}, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return ref, "", nil, err
|
||||||
}
|
}
|
||||||
return res.dgst, res.dt, nil
|
return res.ref, res.dgst, res.dt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolveImageConfig returns image config for an image
|
// ResolveImageConfig returns image config for an image
|
||||||
func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt sourceresolver.Opt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
|
func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) {
|
||||||
if opt.ImageOpt == nil {
|
|
||||||
return "", nil, fmt.Errorf("can only resolve an image: %v, opt: %v", ref, opt)
|
|
||||||
}
|
|
||||||
ref, err := applySourcePolicies(ctx, ref, opt.SourcePolicies)
|
ref, err := applySourcePolicies(ctx, ref, opt.SourcePolicies)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", "", nil, err
|
||||||
}
|
}
|
||||||
resolveMode, err := resolver.ParseImageResolveMode(opt.ImageOpt.ResolveMode)
|
resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return ref, "", nil, err
|
||||||
}
|
}
|
||||||
switch resolveMode {
|
switch resolveMode {
|
||||||
case resolver.ResolveModeForcePull:
|
case source.ResolveModeForcePull:
|
||||||
return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
|
ref, dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform, sm, g)
|
||||||
// TODO: pull should fallback to local in case of failure to allow offline behavior
|
// TODO: pull should fallback to local in case of failure to allow offline behavior
|
||||||
// the fallback doesn't work currently
|
// the fallback doesn't work currently
|
||||||
|
return ref, dgst, dt, err
|
||||||
/*
|
/*
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return dgst, dt, err
|
return dgst, dt, err
|
||||||
|
@ -225,10 +153,10 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt source
|
||||||
return "", dt, err
|
return "", dt, err
|
||||||
*/
|
*/
|
||||||
|
|
||||||
case resolver.ResolveModeDefault:
|
case source.ResolveModeDefault:
|
||||||
// default == prefer local, but in the future could be smarter
|
// default == prefer local, but in the future could be smarter
|
||||||
fallthrough
|
fallthrough
|
||||||
case resolver.ResolveModePreferLocal:
|
case source.ResolveModePreferLocal:
|
||||||
img, err := is.resolveLocal(ref)
|
img, err := is.resolveLocal(ref)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if opt.Platform != nil && !platformMatches(img, opt.Platform) {
|
if opt.Platform != nil && !platformMatches(img, opt.Platform) {
|
||||||
|
@ -237,19 +165,19 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt source
|
||||||
path.Join(img.OS, img.Architecture, img.Variant),
|
path.Join(img.OS, img.Architecture, img.Variant),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
return "", img.RawJSON(), err
|
return ref, "", img.RawJSON(), err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// fallback to remote
|
// fallback to remote
|
||||||
return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
|
return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
|
||||||
}
|
}
|
||||||
// should never happen
|
// should never happen
|
||||||
return "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ImageOpt.ResolveMode)
|
return ref, "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve returns access to pulling for an identifier
|
// Resolve returns access to pulling for an identifier
|
||||||
func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) {
|
func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) {
|
||||||
imageIdentifier, ok := id.(*containerimage.ImageIdentifier)
|
imageIdentifier, ok := id.(*source.ImageIdentifier)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, errors.Errorf("invalid image identifier %v", id)
|
return nil, errors.Errorf("invalid image identifier %v", id)
|
||||||
}
|
}
|
||||||
|
@ -273,7 +201,7 @@ type puller struct {
|
||||||
is *Source
|
is *Source
|
||||||
resolveLocalOnce sync.Once
|
resolveLocalOnce sync.Once
|
||||||
g flightcontrol.Group[struct{}]
|
g flightcontrol.Group[struct{}]
|
||||||
src *containerimage.ImageIdentifier
|
src *source.ImageIdentifier
|
||||||
desc ocispec.Descriptor
|
desc ocispec.Descriptor
|
||||||
ref string
|
ref string
|
||||||
config []byte
|
config []byte
|
||||||
|
@ -325,7 +253,7 @@ func (p *puller) resolveLocal() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.src.ResolveMode == resolver.ResolveModeDefault || p.src.ResolveMode == resolver.ResolveModePreferLocal {
|
if p.src.ResolveMode == source.ResolveModeDefault || p.src.ResolveMode == source.ResolveModePreferLocal {
|
||||||
ref := p.src.Reference.String()
|
ref := p.src.Reference.String()
|
||||||
img, err := p.is.resolveLocal(ref)
|
img, err := p.is.resolveLocal(ref)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -374,17 +302,12 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return struct{}{}, err
|
return struct{}{}, err
|
||||||
}
|
}
|
||||||
_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), sourceresolver.Opt{
|
newRef, _, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: p.src.ResolveMode.String()}, p.sm, g)
|
||||||
Platform: &p.platform,
|
|
||||||
ImageOpt: &sourceresolver.ResolveImageOpt{
|
|
||||||
ResolveMode: p.src.ResolveMode.String(),
|
|
||||||
},
|
|
||||||
}, p.sm, g)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return struct{}{}, err
|
return struct{}{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
p.ref = ref.String()
|
p.ref = newRef
|
||||||
p.config = dt
|
p.config = dt
|
||||||
}
|
}
|
||||||
return struct{}{}, nil
|
return struct{}{}, nil
|
||||||
|
@ -943,8 +866,12 @@ func applySourcePolicies(ctx context.Context, str string, spls []*spb.Policy) (s
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.WithStack(err)
|
return "", errors.WithStack(err)
|
||||||
}
|
}
|
||||||
op := &pb.SourceOp{
|
op := &pb.Op{
|
||||||
Identifier: srctypes.DockerImageScheme + "://" + ref.String(),
|
Op: &pb.Op_Source{
|
||||||
|
Source: &pb.SourceOp{
|
||||||
|
Identifier: srctypes.DockerImageScheme + "://" + ref.String(),
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
mut, err := sourcepolicy.NewEngine(spls).Evaluate(ctx, op)
|
mut, err := sourcepolicy.NewEngine(spls).Evaluate(ctx, op)
|
||||||
|
@ -957,9 +884,9 @@ func applySourcePolicies(ctx context.Context, str string, spls []*spb.Policy) (s
|
||||||
t string
|
t string
|
||||||
ok bool
|
ok bool
|
||||||
)
|
)
|
||||||
t, newRef, ok := strings.Cut(op.GetIdentifier(), "://")
|
t, newRef, ok := strings.Cut(op.GetSource().GetIdentifier(), "://")
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", errors.Errorf("could not parse ref: %s", op.GetIdentifier())
|
return "", errors.Errorf("could not parse ref: %s", op.GetSource().GetIdentifier())
|
||||||
}
|
}
|
||||||
if ok && t != srctypes.DockerImageScheme {
|
if ok && t != srctypes.DockerImageScheme {
|
||||||
return "", &imageutil.ResolveToNonImageError{Ref: str, Updated: newRef}
|
return "", &imageutil.ResolveToNonImageError{Ref: str, Updated: newRef}
|
||||||
|
|
|
@ -389,10 +389,9 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
|
||||||
}
|
}
|
||||||
|
|
||||||
req := &controlapi.SolveRequest{
|
req := &controlapi.SolveRequest{
|
||||||
Ref: id,
|
Ref: id,
|
||||||
Exporters: []*controlapi.Exporter{
|
Exporter: exporterName,
|
||||||
&controlapi.Exporter{Type: exporterName, Attrs: exporterAttrs},
|
ExporterAttrs: exporterAttrs,
|
||||||
},
|
|
||||||
Frontend: "dockerfile.v0",
|
Frontend: "dockerfile.v0",
|
||||||
FrontendAttrs: frontendAttrs,
|
FrontendAttrs: frontendAttrs,
|
||||||
Session: opt.Options.SessionID,
|
Session: opt.Options.SessionID,
|
||||||
|
|
|
@ -67,11 +67,11 @@ func newController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTraceExporter(ctx context.Context) trace.SpanExporter {
|
func getTraceExporter(ctx context.Context) trace.SpanExporter {
|
||||||
span, _, err := detect.Exporter()
|
exp, err := detect.Exporter()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).WithError(err).Error("Failed to detect trace exporter for buildkit controller")
|
log.G(ctx).WithError(err).Error("Failed to detect trace exporter for buildkit controller")
|
||||||
}
|
}
|
||||||
return span
|
return exp
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
||||||
|
@ -105,8 +105,7 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||||
wo, err := containerd.NewWorkerOpt(opt.Root, opt.ContainerdAddress, opt.Snapshotter, opt.ContainerdNamespace,
|
wo, err := containerd.NewWorkerOpt(opt.Root, opt.ContainerdAddress, opt.Snapshotter, opt.ContainerdNamespace,
|
||||||
opt.Rootless, map[string]string{
|
opt.Rootless, map[string]string{
|
||||||
label.Snapshotter: opt.Snapshotter,
|
label.Snapshotter: opt.Snapshotter,
|
||||||
}, dns, nc, opt.ApparmorProfile, false, nil, "", nil, ctd.WithTimeout(60*time.Second),
|
}, dns, nc, opt.ApparmorProfile, false, nil, "", ctd.WithTimeout(60*time.Second))
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -303,11 +302,9 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
||||||
}
|
}
|
||||||
|
|
||||||
exp, err := mobyexporter.New(mobyexporter.Opt{
|
exp, err := mobyexporter.New(mobyexporter.Opt{
|
||||||
ImageStore: dist.ImageStore,
|
ImageStore: dist.ImageStore,
|
||||||
ContentStore: store,
|
Differ: differ,
|
||||||
Differ: differ,
|
ImageTagger: opt.ImageTagger,
|
||||||
ImageTagger: opt.ImageTagger,
|
|
||||||
LeaseManager: lm,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -16,7 +16,6 @@ import (
|
||||||
"github.com/moby/buildkit/executor"
|
"github.com/moby/buildkit/executor"
|
||||||
"github.com/moby/buildkit/executor/oci"
|
"github.com/moby/buildkit/executor/oci"
|
||||||
"github.com/moby/buildkit/executor/resources"
|
"github.com/moby/buildkit/executor/resources"
|
||||||
resourcestypes "github.com/moby/buildkit/executor/resources/types"
|
|
||||||
"github.com/moby/buildkit/executor/runcexecutor"
|
"github.com/moby/buildkit/executor/runcexecutor"
|
||||||
"github.com/moby/buildkit/identity"
|
"github.com/moby/buildkit/identity"
|
||||||
"github.com/moby/buildkit/solver/pb"
|
"github.com/moby/buildkit/solver/pb"
|
||||||
|
@ -57,16 +56,9 @@ func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfi
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
runcCmds := []string{"runc"}
|
|
||||||
|
|
||||||
// TODO: FIXME: testing env var, replace with something better or remove in a major version or two
|
|
||||||
if runcOverride := os.Getenv("DOCKER_BUILDKIT_RUNC_COMMAND"); runcOverride != "" {
|
|
||||||
runcCmds = []string{runcOverride}
|
|
||||||
}
|
|
||||||
|
|
||||||
return runcexecutor.New(runcexecutor.Opt{
|
return runcexecutor.New(runcexecutor.Opt{
|
||||||
Root: filepath.Join(root, "executor"),
|
Root: filepath.Join(root, "executor"),
|
||||||
CommandCandidates: runcCmds,
|
CommandCandidates: []string{"runc"},
|
||||||
DefaultCgroupParent: cgroupParent,
|
DefaultCgroupParent: cgroupParent,
|
||||||
Rootless: rootless,
|
Rootless: rootless,
|
||||||
NoPivot: os.Getenv("DOCKER_RAMDISK") != "",
|
NoPivot: os.Getenv("DOCKER_RAMDISK") != "",
|
||||||
|
@ -136,8 +128,8 @@ func (iface *lnInterface) init(c *libnetwork.Controller, n *libnetwork.Network)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(neersighted): Unstub Sample(), and collect data from the libnetwork Endpoint.
|
// TODO(neersighted): Unstub Sample(), and collect data from the libnetwork Endpoint.
|
||||||
func (iface *lnInterface) Sample() (*resourcestypes.NetworkSample, error) {
|
func (iface *lnInterface) Sample() (*network.Sample, error) {
|
||||||
return &resourcestypes.NetworkSample{}, nil
|
return &network.Sample{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (iface *lnInterface) Set(s *specs.Spec) error {
|
func (iface *lnInterface) Set(s *specs.Spec) error {
|
||||||
|
|
|
@ -1,27 +1,24 @@
|
||||||
package mobyexporter
|
package mobyexporter
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/containerd/containerd/content"
|
|
||||||
"github.com/containerd/containerd/leases"
|
|
||||||
"github.com/containerd/log"
|
|
||||||
distref "github.com/distribution/reference"
|
distref "github.com/distribution/reference"
|
||||||
"github.com/docker/docker/image"
|
"github.com/docker/docker/image"
|
||||||
"github.com/docker/docker/internal/compatcontext"
|
|
||||||
"github.com/docker/docker/layer"
|
"github.com/docker/docker/layer"
|
||||||
"github.com/moby/buildkit/exporter"
|
"github.com/moby/buildkit/exporter"
|
||||||
"github.com/moby/buildkit/exporter/containerimage"
|
|
||||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||||
"github.com/moby/buildkit/util/leaseutil"
|
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
keyImageName = "name"
|
||||||
|
)
|
||||||
|
|
||||||
// Differ can make a moby layer from a snapshot
|
// Differ can make a moby layer from a snapshot
|
||||||
type Differ interface {
|
type Differ interface {
|
||||||
EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
|
EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
|
||||||
|
@ -33,11 +30,9 @@ type ImageTagger interface {
|
||||||
|
|
||||||
// Opt defines a struct for creating new exporter
|
// Opt defines a struct for creating new exporter
|
||||||
type Opt struct {
|
type Opt struct {
|
||||||
ImageStore image.Store
|
ImageStore image.Store
|
||||||
Differ Differ
|
Differ Differ
|
||||||
ImageTagger ImageTagger
|
ImageTagger ImageTagger
|
||||||
ContentStore content.Store
|
|
||||||
LeaseManager leases.Manager
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type imageExporter struct {
|
type imageExporter struct {
|
||||||
|
@ -50,14 +45,13 @@ func New(opt Opt) (exporter.Exporter, error) {
|
||||||
return im, nil
|
return im, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *imageExporter) Resolve(ctx context.Context, id int, opt map[string]string) (exporter.ExporterInstance, error) {
|
func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
|
||||||
i := &imageExporterInstance{
|
i := &imageExporterInstance{
|
||||||
imageExporter: e,
|
imageExporter: e,
|
||||||
id: id,
|
|
||||||
}
|
}
|
||||||
for k, v := range opt {
|
for k, v := range opt {
|
||||||
switch exptypes.ImageExporterOptKey(k) {
|
switch k {
|
||||||
case exptypes.OptKeyName:
|
case keyImageName:
|
||||||
for _, v := range strings.Split(v, ",") {
|
for _, v := range strings.Split(v, ",") {
|
||||||
ref, err := distref.ParseNormalizedNamed(v)
|
ref, err := distref.ParseNormalizedNamed(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -77,15 +71,10 @@ func (e *imageExporter) Resolve(ctx context.Context, id int, opt map[string]stri
|
||||||
|
|
||||||
type imageExporterInstance struct {
|
type imageExporterInstance struct {
|
||||||
*imageExporter
|
*imageExporter
|
||||||
id int
|
|
||||||
targetNames []distref.Named
|
targetNames []distref.Named
|
||||||
meta map[string][]byte
|
meta map[string][]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *imageExporterInstance) ID() int {
|
|
||||||
return e.id
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *imageExporterInstance) Name() string {
|
func (e *imageExporterInstance) Name() string {
|
||||||
return "exporting to image"
|
return "exporting to image"
|
||||||
}
|
}
|
||||||
|
@ -94,7 +83,7 @@ func (e *imageExporterInstance) Config() *exporter.Config {
|
||||||
return exporter.NewConfig()
|
return exporter.NewConfig()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source, inlineCache exptypes.InlineCache, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
|
func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
|
||||||
if len(inp.Refs) > 1 {
|
if len(inp.Refs) > 1 {
|
||||||
return nil, nil, fmt.Errorf("exporting multiple references to image store is currently unsupported")
|
return nil, nil, fmt.Errorf("exporting multiple references to image store is currently unsupported")
|
||||||
}
|
}
|
||||||
|
@ -114,14 +103,18 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
|
||||||
case 0:
|
case 0:
|
||||||
config = inp.Metadata[exptypes.ExporterImageConfigKey]
|
config = inp.Metadata[exptypes.ExporterImageConfigKey]
|
||||||
case 1:
|
case 1:
|
||||||
ps, err := exptypes.ParsePlatforms(inp.Metadata)
|
platformsBytes, ok := inp.Metadata[exptypes.ExporterPlatformsKey]
|
||||||
if err != nil {
|
if !ok {
|
||||||
return nil, nil, fmt.Errorf("cannot export image, failed to parse platforms: %w", err)
|
return nil, nil, fmt.Errorf("cannot export image, missing platforms mapping")
|
||||||
}
|
}
|
||||||
if len(ps.Platforms) != len(inp.Refs) {
|
var p exptypes.Platforms
|
||||||
return nil, nil, errors.Errorf("number of platforms does not match references %d %d", len(ps.Platforms), len(inp.Refs))
|
if err := json.Unmarshal(platformsBytes, &p); err != nil {
|
||||||
|
return nil, nil, errors.Wrapf(err, "failed to parse platforms passed to exporter")
|
||||||
}
|
}
|
||||||
config = inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, ps.Platforms[0].ID)]
|
if len(p.Platforms) != len(inp.Refs) {
|
||||||
|
return nil, nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs))
|
||||||
|
}
|
||||||
|
config = inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, p.Platforms[0].ID)]
|
||||||
}
|
}
|
||||||
|
|
||||||
var diffs []digest.Digest
|
var diffs []digest.Digest
|
||||||
|
@ -164,21 +157,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
|
||||||
|
|
||||||
diffs, history = normalizeLayersAndHistory(diffs, history, ref)
|
diffs, history = normalizeLayersAndHistory(diffs, history, ref)
|
||||||
|
|
||||||
var inlineCacheEntry *exptypes.InlineCacheEntry
|
config, err = patchImageConfig(config, diffs, history, inp.Metadata[exptypes.ExporterInlineCache])
|
||||||
if inlineCache != nil {
|
|
||||||
inlineCacheResult, err := inlineCache(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
if inlineCacheResult != nil {
|
|
||||||
if ref != nil {
|
|
||||||
inlineCacheEntry, _ = inlineCacheResult.FindRef(ref.ID())
|
|
||||||
} else {
|
|
||||||
inlineCacheEntry = inlineCacheResult.Ref
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
config, err = patchImageConfig(config, diffs, history, inlineCacheEntry)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -192,10 +171,8 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
|
||||||
}
|
}
|
||||||
_ = configDone(nil)
|
_ = configDone(nil)
|
||||||
|
|
||||||
var names []string
|
if e.opt.ImageTagger != nil {
|
||||||
for _, targetName := range e.targetNames {
|
for _, targetName := range e.targetNames {
|
||||||
names = append(names, targetName.String())
|
|
||||||
if e.opt.ImageTagger != nil {
|
|
||||||
tagDone := oneOffProgress(ctx, "naming to "+targetName.String())
|
tagDone := oneOffProgress(ctx, "naming to "+targetName.String())
|
||||||
if err := e.opt.ImageTagger.TagImage(ctx, image.ID(digest.Digest(id)), targetName); err != nil {
|
if err := e.opt.ImageTagger.TagImage(ctx, image.ID(digest.Digest(id)), targetName); err != nil {
|
||||||
return nil, nil, tagDone(err)
|
return nil, nil, tagDone(err)
|
||||||
|
@ -204,49 +181,8 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp := map[string]string{
|
return map[string]string{
|
||||||
exptypes.ExporterImageConfigDigestKey: configDigest.String(),
|
exptypes.ExporterImageConfigDigestKey: configDigest.String(),
|
||||||
exptypes.ExporterImageDigestKey: id.String(),
|
exptypes.ExporterImageDigestKey: id.String(),
|
||||||
}
|
}, nil, nil
|
||||||
if len(names) > 0 {
|
|
||||||
resp["image.name"] = strings.Join(names, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
descRef, err := e.newTempReference(ctx, config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to create a temporary descriptor reference: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, descRef, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *imageExporterInstance) newTempReference(ctx context.Context, config []byte) (exporter.DescriptorReference, error) {
|
|
||||||
lm := e.opt.LeaseManager
|
|
||||||
|
|
||||||
dgst := digest.FromBytes(config)
|
|
||||||
leaseCtx, done, err := leaseutil.WithLease(ctx, lm, leaseutil.MakeTemporary)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
unlease := func(ctx context.Context) error {
|
|
||||||
err := done(compatcontext.WithoutCancel(ctx))
|
|
||||||
if err != nil {
|
|
||||||
log.G(ctx).WithError(err).Error("failed to delete descriptor reference lease")
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
desc := ocispec.Descriptor{
|
|
||||||
Digest: dgst,
|
|
||||||
MediaType: "application/vnd.docker.container.image.v1+json",
|
|
||||||
Size: int64(len(config)),
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := content.WriteBlob(leaseCtx, e.opt.ContentStore, desc.Digest.String(), bytes.NewReader(config), desc); err != nil {
|
|
||||||
unlease(leaseCtx)
|
|
||||||
return nil, fmt.Errorf("failed to save temporary image config: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return containerimage.NewDescriptorReference(desc, unlease), nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,6 @@ import (
|
||||||
"github.com/containerd/containerd/platforms"
|
"github.com/containerd/containerd/platforms"
|
||||||
"github.com/containerd/log"
|
"github.com/containerd/log"
|
||||||
"github.com/moby/buildkit/cache"
|
"github.com/moby/buildkit/cache"
|
||||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
|
||||||
"github.com/moby/buildkit/util/progress"
|
"github.com/moby/buildkit/util/progress"
|
||||||
"github.com/moby/buildkit/util/system"
|
"github.com/moby/buildkit/util/system"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
|
@ -39,7 +38,7 @@ func parseHistoryFromConfig(dt []byte) ([]ocispec.History, error) {
|
||||||
return config.History, nil
|
return config.History, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History, cache *exptypes.InlineCacheEntry) ([]byte, error) {
|
func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History, cache []byte) ([]byte, error) {
|
||||||
m := map[string]json.RawMessage{}
|
m := map[string]json.RawMessage{}
|
||||||
if err := json.Unmarshal(dt, &m); err != nil {
|
if err := json.Unmarshal(dt, &m); err != nil {
|
||||||
return nil, errors.Wrap(err, "failed to parse image config for patch")
|
return nil, errors.Wrap(err, "failed to parse image config for patch")
|
||||||
|
@ -76,7 +75,7 @@ func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History,
|
||||||
}
|
}
|
||||||
|
|
||||||
if cache != nil {
|
if cache != nil {
|
||||||
dt, err := json.Marshal(cache.Data)
|
dt, err := json.Marshal(cache)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ func NewExporterWrapper(exp exporter.Exporter) (exporter.Exporter, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resolve applies moby specific attributes to the request.
|
// Resolve applies moby specific attributes to the request.
|
||||||
func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, id int, exporterAttrs map[string]string) (exporter.ExporterInstance, error) {
|
func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, exporterAttrs map[string]string) (exporter.ExporterInstance, error) {
|
||||||
if exporterAttrs == nil {
|
if exporterAttrs == nil {
|
||||||
exporterAttrs = make(map[string]string)
|
exporterAttrs = make(map[string]string)
|
||||||
}
|
}
|
||||||
|
@ -33,5 +33,5 @@ func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, id int, exporter
|
||||||
exporterAttrs[string(exptypes.OptKeyDanglingPrefix)] = "moby-dangling"
|
exporterAttrs[string(exptypes.OptKeyDanglingPrefix)] = "moby-dangling"
|
||||||
}
|
}
|
||||||
|
|
||||||
return e.exp.Resolve(ctx, id, exporterAttrs)
|
return e.exp.Resolve(ctx, exporterAttrs)
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@ import (
|
||||||
"github.com/containerd/containerd/platforms"
|
"github.com/containerd/containerd/platforms"
|
||||||
"github.com/containerd/containerd/rootfs"
|
"github.com/containerd/containerd/rootfs"
|
||||||
"github.com/containerd/log"
|
"github.com/containerd/log"
|
||||||
imageadapter "github.com/docker/docker/builder/builder-next/adapters/containerimage"
|
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
|
||||||
mobyexporter "github.com/docker/docker/builder/builder-next/exporter"
|
mobyexporter "github.com/docker/docker/builder/builder-next/exporter"
|
||||||
distmetadata "github.com/docker/docker/distribution/metadata"
|
distmetadata "github.com/docker/docker/distribution/metadata"
|
||||||
"github.com/docker/docker/distribution/xfer"
|
"github.com/docker/docker/distribution/xfer"
|
||||||
|
@ -23,7 +23,7 @@ import (
|
||||||
"github.com/moby/buildkit/cache"
|
"github.com/moby/buildkit/cache"
|
||||||
cacheconfig "github.com/moby/buildkit/cache/config"
|
cacheconfig "github.com/moby/buildkit/cache/config"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/client/llb/sourceresolver"
|
"github.com/moby/buildkit/client/llb"
|
||||||
"github.com/moby/buildkit/executor"
|
"github.com/moby/buildkit/executor"
|
||||||
"github.com/moby/buildkit/exporter"
|
"github.com/moby/buildkit/exporter"
|
||||||
localexporter "github.com/moby/buildkit/exporter/local"
|
localexporter "github.com/moby/buildkit/exporter/local"
|
||||||
|
@ -37,7 +37,6 @@ import (
|
||||||
"github.com/moby/buildkit/solver/llbsolver/ops"
|
"github.com/moby/buildkit/solver/llbsolver/ops"
|
||||||
"github.com/moby/buildkit/solver/pb"
|
"github.com/moby/buildkit/solver/pb"
|
||||||
"github.com/moby/buildkit/source"
|
"github.com/moby/buildkit/source"
|
||||||
"github.com/moby/buildkit/source/containerimage"
|
|
||||||
"github.com/moby/buildkit/source/git"
|
"github.com/moby/buildkit/source/git"
|
||||||
"github.com/moby/buildkit/source/http"
|
"github.com/moby/buildkit/source/http"
|
||||||
"github.com/moby/buildkit/source/local"
|
"github.com/moby/buildkit/source/local"
|
||||||
|
@ -76,7 +75,7 @@ type Opt struct {
|
||||||
ContentStore *containerdsnapshot.Store
|
ContentStore *containerdsnapshot.Store
|
||||||
CacheManager cache.Manager
|
CacheManager cache.Manager
|
||||||
LeaseManager *leaseutil.Manager
|
LeaseManager *leaseutil.Manager
|
||||||
ImageSource *imageadapter.Source
|
ImageSource *containerimage.Source
|
||||||
DownloadManager *xfer.LayerDownloadManager
|
DownloadManager *xfer.LayerDownloadManager
|
||||||
V2MetadataService distmetadata.V2MetadataService
|
V2MetadataService distmetadata.V2MetadataService
|
||||||
Transport nethttp.RoundTripper
|
Transport nethttp.RoundTripper
|
||||||
|
@ -213,49 +212,6 @@ func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.Imm
|
||||||
return w.CacheManager().Get(ctx, id, nil, opts...)
|
return w.CacheManager().Get(ctx, id, nil, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *Worker) ResolveSourceMetadata(ctx context.Context, op *pb.SourceOp, opt sourceresolver.Opt, sm *session.Manager, g session.Group) (*sourceresolver.MetaResponse, error) {
|
|
||||||
if opt.SourcePolicies != nil {
|
|
||||||
return nil, errors.New("source policies can not be set for worker")
|
|
||||||
}
|
|
||||||
|
|
||||||
var platform *pb.Platform
|
|
||||||
if p := opt.Platform; p != nil {
|
|
||||||
platform = &pb.Platform{
|
|
||||||
Architecture: p.Architecture,
|
|
||||||
OS: p.OS,
|
|
||||||
Variant: p.Variant,
|
|
||||||
OSVersion: p.OSVersion,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
id, err := w.SourceManager.Identifier(&pb.Op_Source{Source: op}, platform)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch idt := id.(type) {
|
|
||||||
case *containerimage.ImageIdentifier:
|
|
||||||
if opt.ImageOpt == nil {
|
|
||||||
opt.ImageOpt = &sourceresolver.ResolveImageOpt{}
|
|
||||||
}
|
|
||||||
dgst, config, err := w.ImageSource.ResolveImageConfig(ctx, idt.Reference.String(), opt, sm, g)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &sourceresolver.MetaResponse{
|
|
||||||
Op: op,
|
|
||||||
Image: &sourceresolver.ResolveImageResponse{
|
|
||||||
Digest: dgst,
|
|
||||||
Config: config,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &sourceresolver.MetaResponse{
|
|
||||||
Op: op,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolveOp converts a LLB vertex into a LLB operation
|
// ResolveOp converts a LLB vertex into a LLB operation
|
||||||
func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
|
func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
|
||||||
if baseOp, ok := v.Sys().(*pb.Op); ok {
|
if baseOp, ok := v.Sys().(*pb.Op); ok {
|
||||||
|
@ -280,7 +236,7 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResolveImageConfig returns image config for an image
|
// ResolveImageConfig returns image config for an image
|
||||||
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt sourceresolver.Opt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
|
func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) {
|
||||||
return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g)
|
return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -570,7 +526,3 @@ type emptyProvider struct{}
|
||||||
func (p *emptyProvider) ReaderAt(ctx context.Context, dec ocispec.Descriptor) (content.ReaderAt, error) {
|
func (p *emptyProvider) ReaderAt(ctx context.Context, dec ocispec.Descriptor) (content.ReaderAt, error) {
|
||||||
return nil, errors.Errorf("ReaderAt not implemented for empty provider")
|
return nil, errors.Errorf("ReaderAt not implemented for empty provider")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *emptyProvider) Info(ctx context.Context, d digest.Digest) (content.Info, error) {
|
|
||||||
return content.Info{}, errors.Errorf("Info not implemented for empty provider")
|
|
||||||
}
|
|
||||||
|
|
|
@ -64,7 +64,7 @@ type ExecBackend interface {
|
||||||
// ContainerRm removes a container specified by `id`.
|
// ContainerRm removes a container specified by `id`.
|
||||||
ContainerRm(name string, config *backend.ContainerRmConfig) error
|
ContainerRm(name string, config *backend.ContainerRmConfig) error
|
||||||
// ContainerStart starts a new container
|
// ContainerStart starts a new container
|
||||||
ContainerStart(ctx context.Context, containerID string, checkpoint string, checkpointDir string) error
|
ContainerStart(ctx context.Context, containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
|
||||||
// ContainerWait stops processing until the given container is stopped.
|
// ContainerWait stops processing until the given container is stopped.
|
||||||
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
|
ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error)
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,7 +72,7 @@ func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr i
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err := c.backend.ContainerStart(ctx, cID, "", ""); err != nil {
|
if err := c.backend.ContainerStart(ctx, cID, nil, "", ""); err != nil {
|
||||||
close(finished)
|
close(finished)
|
||||||
logCancellationError(cancelErrCh, "error from ContainerStart: "+err.Error())
|
logCancellationError(cancelErrCh, "error from ContainerStart: "+err.Error())
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -22,7 +22,7 @@ func normalizeWorkdir(_ string, current string, requested string) (string, error
|
||||||
if !filepath.IsAbs(requested) {
|
if !filepath.IsAbs(requested) {
|
||||||
return filepath.Join(string(os.PathSeparator), current, requested), nil
|
return filepath.Join(string(os.PathSeparator), current, requested), nil
|
||||||
}
|
}
|
||||||
return filepath.Clean(requested), nil
|
return requested, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveCmdLine takes a command line arg set and optionally prepends a platform-specific
|
// resolveCmdLine takes a command line arg set and optionally prepends a platform-specific
|
||||||
|
|
|
@ -15,13 +15,11 @@ import (
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/backend"
|
"github.com/docker/docker/api/types/backend"
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/network"
|
|
||||||
"github.com/docker/docker/builder"
|
"github.com/docker/docker/builder"
|
||||||
"github.com/docker/docker/image"
|
"github.com/docker/docker/image"
|
||||||
"github.com/docker/docker/pkg/archive"
|
"github.com/docker/docker/pkg/archive"
|
||||||
"github.com/docker/docker/pkg/chrootarchive"
|
"github.com/docker/docker/pkg/chrootarchive"
|
||||||
"github.com/docker/docker/pkg/stringid"
|
"github.com/docker/docker/pkg/stringid"
|
||||||
"github.com/docker/docker/runconfig"
|
|
||||||
"github.com/docker/go-connections/nat"
|
"github.com/docker/go-connections/nat"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
@ -379,21 +377,12 @@ func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConf
|
||||||
Ulimits: options.Ulimits,
|
Ulimits: options.Ulimits,
|
||||||
}
|
}
|
||||||
|
|
||||||
// We need to make sure no empty string or "default" NetworkMode is
|
|
||||||
// provided to the daemon as it doesn't support them.
|
|
||||||
//
|
|
||||||
// This is in line with what the ContainerCreate API endpoint does.
|
|
||||||
networkMode := options.NetworkMode
|
|
||||||
if networkMode == "" || networkMode == network.NetworkDefault {
|
|
||||||
networkMode = runconfig.DefaultDaemonNetworkMode().NetworkName()
|
|
||||||
}
|
|
||||||
|
|
||||||
hc := &container.HostConfig{
|
hc := &container.HostConfig{
|
||||||
SecurityOpt: options.SecurityOpt,
|
SecurityOpt: options.SecurityOpt,
|
||||||
Isolation: options.Isolation,
|
Isolation: options.Isolation,
|
||||||
ShmSize: options.ShmSize,
|
ShmSize: options.ShmSize,
|
||||||
Resources: resources,
|
Resources: resources,
|
||||||
NetworkMode: container.NetworkMode(networkMode),
|
NetworkMode: container.NetworkMode(options.NetworkMode),
|
||||||
// Set a log config to override any default value set on the daemon
|
// Set a log config to override any default value set on the daemon
|
||||||
LogConfig: defaultLogConfig,
|
LogConfig: defaultLogConfig,
|
||||||
ExtraHosts: options.ExtraHosts,
|
ExtraHosts: options.ExtraHosts,
|
||||||
|
|
|
@ -46,7 +46,7 @@ func (m *MockBackend) CommitBuildStep(ctx context.Context, c backend.CommitConfi
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockBackend) ContainerStart(ctx context.Context, containerID string, checkpoint string, checkpointDir string) error {
|
func (m *MockBackend) ContainerStart(ctx context.Context, containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,11 +10,11 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// DistributionInspect returns the image digest with the full manifest.
|
// DistributionInspect returns the image digest with the full manifest.
|
||||||
func (cli *Client) DistributionInspect(ctx context.Context, imageRef, encodedRegistryAuth string) (registry.DistributionInspect, error) {
|
func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) {
|
||||||
// Contact the registry to retrieve digest and platform information
|
// Contact the registry to retrieve digest and platform information
|
||||||
var distributionInspect registry.DistributionInspect
|
var distributionInspect registry.DistributionInspect
|
||||||
if imageRef == "" {
|
if image == "" {
|
||||||
return distributionInspect, objectNotFoundError{object: "distribution", id: imageRef}
|
return distributionInspect, objectNotFoundError{object: "distribution", id: image}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cli.NewVersionError(ctx, "1.30", "distribution inspect"); err != nil {
|
if err := cli.NewVersionError(ctx, "1.30", "distribution inspect"); err != nil {
|
||||||
|
@ -28,7 +28,7 @@ func (cli *Client) DistributionInspect(ctx context.Context, imageRef, encodedReg
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := cli.get(ctx, "/distribution/"+imageRef+"/json", url.Values{}, headers)
|
resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers)
|
||||||
defer ensureReaderClosed(resp)
|
defer ensureReaderClosed(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return distributionInspect, err
|
return distributionInspect, err
|
||||||
|
|
|
@ -8,13 +8,13 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
"github.com/docker/docker/api/types/image"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/docker/api/types/registry"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ImageCreate creates a new image based on the parent options.
|
// ImageCreate creates a new image based on the parent options.
|
||||||
// It returns the JSON content in the response body.
|
// It returns the JSON content in the response body.
|
||||||
func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) {
|
func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) {
|
||||||
ref, err := reference.ParseNormalizedNamed(parentReference)
|
ref, err := reference.ParseNormalizedNamed(parentReference)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/image"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/docker/api/types/registry"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/v3/assert"
|
||||||
|
@ -20,7 +20,7 @@ func TestImageCreateError(t *testing.T) {
|
||||||
client := &Client{
|
client := &Client{
|
||||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||||
}
|
}
|
||||||
_, err := client.ImageCreate(context.Background(), "reference", image.CreateOptions{})
|
_, err := client.ImageCreate(context.Background(), "reference", types.ImageCreateOptions{})
|
||||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ func TestImageCreate(t *testing.T) {
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
createResponse, err := client.ImageCreate(context.Background(), expectedReference, image.CreateOptions{
|
createResponse, err := client.ImageCreate(context.Background(), expectedReference, types.ImageCreateOptions{
|
||||||
RegistryAuth: expectedRegistryAuth,
|
RegistryAuth: expectedRegistryAuth,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -8,12 +8,11 @@ import (
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/image"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ImageImport creates a new image based on the source options.
|
// ImageImport creates a new image based on the source options.
|
||||||
// It returns the JSON content in the response body.
|
// It returns the JSON content in the response body.
|
||||||
func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) {
|
func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) {
|
||||||
if ref != "" {
|
if ref != "" {
|
||||||
// Check if the given image name can be resolved
|
// Check if the given image name can be resolved
|
||||||
if _, err := reference.ParseNormalizedNamed(ref); err != nil {
|
if _, err := reference.ParseNormalizedNamed(ref); err != nil {
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/image"
|
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/v3/assert"
|
||||||
is "gotest.tools/v3/assert/cmp"
|
is "gotest.tools/v3/assert/cmp"
|
||||||
|
@ -21,7 +20,7 @@ func TestImageImportError(t *testing.T) {
|
||||||
client := &Client{
|
client := &Client{
|
||||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||||
}
|
}
|
||||||
_, err := client.ImageImport(context.Background(), types.ImageImportSource{}, "image:tag", image.ImportOptions{})
|
_, err := client.ImageImport(context.Background(), types.ImageImportSource{}, "image:tag", types.ImageImportOptions{})
|
||||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +63,7 @@ func TestImageImport(t *testing.T) {
|
||||||
importResponse, err := client.ImageImport(context.Background(), types.ImageImportSource{
|
importResponse, err := client.ImageImport(context.Background(), types.ImageImportSource{
|
||||||
Source: strings.NewReader("source"),
|
Source: strings.NewReader("source"),
|
||||||
SourceName: "image_source",
|
SourceName: "image_source",
|
||||||
}, "repository_name:imported", image.ImportOptions{
|
}, "repository_name:imported", types.ImageImportOptions{
|
||||||
Tag: "imported",
|
Tag: "imported",
|
||||||
Message: "A message",
|
Message: "A message",
|
||||||
Changes: []string{"change1", "change2"},
|
Changes: []string{"change1", "change2"},
|
||||||
|
|
|
@ -5,13 +5,14 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/image"
|
"github.com/docker/docker/api/types/image"
|
||||||
"github.com/docker/docker/api/types/versions"
|
"github.com/docker/docker/api/types/versions"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ImageList returns a list of images in the docker host.
|
// ImageList returns a list of images in the docker host.
|
||||||
func (cli *Client) ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error) {
|
func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error) {
|
||||||
var images []image.Summary
|
var images []image.Summary
|
||||||
|
|
||||||
// Make sure we negotiated (if the client is configured to do so),
|
// Make sure we negotiated (if the client is configured to do so),
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/docker/api/types/image"
|
"github.com/docker/docker/api/types/image"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
|
@ -23,7 +24,7 @@ func TestImageListError(t *testing.T) {
|
||||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := client.ImageList(context.Background(), image.ListOptions{})
|
_, err := client.ImageList(context.Background(), types.ImageListOptions{})
|
||||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,7 +36,7 @@ func TestImageListConnectionError(t *testing.T) {
|
||||||
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
|
client, err := NewClientWithOpts(WithAPIVersionNegotiation(), WithHost("tcp://no-such-host.invalid"))
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
|
|
||||||
_, err = client.ImageList(context.Background(), image.ListOptions{})
|
_, err = client.ImageList(context.Background(), types.ImageListOptions{})
|
||||||
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
|
assert.Check(t, is.ErrorType(err, IsErrConnectionFailed))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,11 +44,11 @@ func TestImageList(t *testing.T) {
|
||||||
const expectedURL = "/images/json"
|
const expectedURL = "/images/json"
|
||||||
|
|
||||||
listCases := []struct {
|
listCases := []struct {
|
||||||
options image.ListOptions
|
options types.ImageListOptions
|
||||||
expectedQueryParams map[string]string
|
expectedQueryParams map[string]string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
options: image.ListOptions{},
|
options: types.ImageListOptions{},
|
||||||
expectedQueryParams: map[string]string{
|
expectedQueryParams: map[string]string{
|
||||||
"all": "",
|
"all": "",
|
||||||
"filter": "",
|
"filter": "",
|
||||||
|
@ -55,7 +56,7 @@ func TestImageList(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
options: image.ListOptions{
|
options: types.ImageListOptions{
|
||||||
Filters: filters.NewArgs(
|
Filters: filters.NewArgs(
|
||||||
filters.Arg("label", "label1"),
|
filters.Arg("label", "label1"),
|
||||||
filters.Arg("label", "label2"),
|
filters.Arg("label", "label2"),
|
||||||
|
@ -69,7 +70,7 @@ func TestImageList(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
options: image.ListOptions{
|
options: types.ImageListOptions{
|
||||||
Filters: filters.NewArgs(filters.Arg("dangling", "false")),
|
Filters: filters.NewArgs(filters.Arg("dangling", "false")),
|
||||||
},
|
},
|
||||||
expectedQueryParams: map[string]string{
|
expectedQueryParams: map[string]string{
|
||||||
|
@ -152,7 +153,7 @@ func TestImageListApiBefore125(t *testing.T) {
|
||||||
version: "1.24",
|
version: "1.24",
|
||||||
}
|
}
|
||||||
|
|
||||||
options := image.ListOptions{
|
options := types.ImageListOptions{
|
||||||
Filters: filters.NewArgs(filters.Arg("reference", "image:tag")),
|
Filters: filters.NewArgs(filters.Arg("reference", "image:tag")),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,12 +174,12 @@ func TestImageListWithSharedSize(t *testing.T) {
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
name string
|
name string
|
||||||
version string
|
version string
|
||||||
options image.ListOptions
|
options types.ImageListOptions
|
||||||
sharedSize string // expected value for the shared-size query param, or empty if it should not be set.
|
sharedSize string // expected value for the shared-size query param, or empty if it should not be set.
|
||||||
}{
|
}{
|
||||||
{name: "unset after 1.42, no options set", version: "1.42"},
|
{name: "unset after 1.42, no options set", version: "1.42"},
|
||||||
{name: "set after 1.42, if requested", version: "1.42", options: image.ListOptions{SharedSize: true}, sharedSize: "1"},
|
{name: "set after 1.42, if requested", version: "1.42", options: types.ImageListOptions{SharedSize: true}, sharedSize: "1"},
|
||||||
{name: "unset before 1.42, even if requested", version: "1.41", options: image.ListOptions{SharedSize: true}},
|
{name: "unset before 1.42, even if requested", version: "1.41", options: types.ImageListOptions{SharedSize: true}},
|
||||||
} {
|
} {
|
||||||
tc := tc
|
tc := tc
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
|
|
@ -7,7 +7,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
"github.com/docker/docker/api/types/image"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ import (
|
||||||
// FIXME(vdemeester): there is currently used in a few way in docker/docker
|
// FIXME(vdemeester): there is currently used in a few way in docker/docker
|
||||||
// - if not in trusted content, ref is used to pass the whole reference, and tag is empty
|
// - if not in trusted content, ref is used to pass the whole reference, and tag is empty
|
||||||
// - if in trusted content, ref is used to pass the reference name, and tag for the digest
|
// - if in trusted content, ref is used to pass the reference name, and tag for the digest
|
||||||
func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.PullOptions) (io.ReadCloser, error) {
|
func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) {
|
||||||
ref, err := reference.ParseNormalizedNamed(refStr)
|
ref, err := reference.ParseNormalizedNamed(refStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/image"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/docker/api/types/registry"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/v3/assert"
|
||||||
|
@ -23,7 +23,7 @@ func TestImagePullReferenceParseError(t *testing.T) {
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
// An empty reference is an invalid reference
|
// An empty reference is an invalid reference
|
||||||
_, err := client.ImagePull(context.Background(), "", image.PullOptions{})
|
_, err := client.ImagePull(context.Background(), "", types.ImagePullOptions{})
|
||||||
if err == nil || !strings.Contains(err.Error(), "invalid reference format") {
|
if err == nil || !strings.Contains(err.Error(), "invalid reference format") {
|
||||||
t.Fatalf("expected an error, got %v", err)
|
t.Fatalf("expected an error, got %v", err)
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,7 @@ func TestImagePullAnyError(t *testing.T) {
|
||||||
client := &Client{
|
client := &Client{
|
||||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||||
}
|
}
|
||||||
_, err := client.ImagePull(context.Background(), "myimage", image.PullOptions{})
|
_, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{})
|
||||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ func TestImagePullStatusUnauthorizedError(t *testing.T) {
|
||||||
client := &Client{
|
client := &Client{
|
||||||
client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")),
|
client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")),
|
||||||
}
|
}
|
||||||
_, err := client.ImagePull(context.Background(), "myimage", image.PullOptions{})
|
_, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{})
|
||||||
assert.Check(t, is.ErrorType(err, errdefs.IsUnauthorized))
|
assert.Check(t, is.ErrorType(err, errdefs.IsUnauthorized))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ func TestImagePullWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) {
|
||||||
privilegeFunc := func() (string, error) {
|
privilegeFunc := func() (string, error) {
|
||||||
return "", fmt.Errorf("Error requesting privilege")
|
return "", fmt.Errorf("Error requesting privilege")
|
||||||
}
|
}
|
||||||
_, err := client.ImagePull(context.Background(), "myimage", image.PullOptions{
|
_, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{
|
||||||
PrivilegeFunc: privilegeFunc,
|
PrivilegeFunc: privilegeFunc,
|
||||||
})
|
})
|
||||||
if err == nil || err.Error() != "Error requesting privilege" {
|
if err == nil || err.Error() != "Error requesting privilege" {
|
||||||
|
@ -67,7 +67,7 @@ func TestImagePullWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T)
|
||||||
privilegeFunc := func() (string, error) {
|
privilegeFunc := func() (string, error) {
|
||||||
return "a-auth-header", nil
|
return "a-auth-header", nil
|
||||||
}
|
}
|
||||||
_, err := client.ImagePull(context.Background(), "myimage", image.PullOptions{
|
_, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{
|
||||||
PrivilegeFunc: privilegeFunc,
|
PrivilegeFunc: privilegeFunc,
|
||||||
})
|
})
|
||||||
assert.Check(t, is.ErrorType(err, errdefs.IsUnauthorized))
|
assert.Check(t, is.ErrorType(err, errdefs.IsUnauthorized))
|
||||||
|
@ -108,7 +108,7 @@ func TestImagePullWithPrivilegedFuncNoError(t *testing.T) {
|
||||||
privilegeFunc := func() (string, error) {
|
privilegeFunc := func() (string, error) {
|
||||||
return "IAmValid", nil
|
return "IAmValid", nil
|
||||||
}
|
}
|
||||||
resp, err := client.ImagePull(context.Background(), "myimage", image.PullOptions{
|
resp, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{
|
||||||
RegistryAuth: "NotValid",
|
RegistryAuth: "NotValid",
|
||||||
PrivilegeFunc: privilegeFunc,
|
PrivilegeFunc: privilegeFunc,
|
||||||
})
|
})
|
||||||
|
@ -179,7 +179,7 @@ func TestImagePullWithoutErrors(t *testing.T) {
|
||||||
}, nil
|
}, nil
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
resp, err := client.ImagePull(context.Background(), pullCase.reference, image.PullOptions{
|
resp, err := client.ImagePull(context.Background(), pullCase.reference, types.ImagePullOptions{
|
||||||
All: pullCase.all,
|
All: pullCase.all,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -8,7 +8,7 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/distribution/reference"
|
"github.com/distribution/reference"
|
||||||
"github.com/docker/docker/api/types/image"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/docker/api/types/registry"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
)
|
)
|
||||||
|
@ -17,7 +17,7 @@ import (
|
||||||
// It executes the privileged function if the operation is unauthorized
|
// It executes the privileged function if the operation is unauthorized
|
||||||
// and it tries one more time.
|
// and it tries one more time.
|
||||||
// It's up to the caller to handle the io.ReadCloser and close it properly.
|
// It's up to the caller to handle the io.ReadCloser and close it properly.
|
||||||
func (cli *Client) ImagePush(ctx context.Context, image string, options image.PushOptions) (io.ReadCloser, error) {
|
func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) {
|
||||||
ref, err := reference.ParseNormalizedNamed(image)
|
ref, err := reference.ParseNormalizedNamed(image)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/image"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/docker/api/types/registry"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/v3/assert"
|
||||||
|
@ -23,12 +23,12 @@ func TestImagePushReferenceError(t *testing.T) {
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
// An empty reference is an invalid reference
|
// An empty reference is an invalid reference
|
||||||
_, err := client.ImagePush(context.Background(), "", image.PushOptions{})
|
_, err := client.ImagePush(context.Background(), "", types.ImagePushOptions{})
|
||||||
if err == nil || !strings.Contains(err.Error(), "invalid reference format") {
|
if err == nil || !strings.Contains(err.Error(), "invalid reference format") {
|
||||||
t.Fatalf("expected an error, got %v", err)
|
t.Fatalf("expected an error, got %v", err)
|
||||||
}
|
}
|
||||||
// An canonical reference cannot be pushed
|
// An canonical reference cannot be pushed
|
||||||
_, err = client.ImagePush(context.Background(), "repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", image.PushOptions{})
|
_, err = client.ImagePush(context.Background(), "repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", types.ImagePushOptions{})
|
||||||
if err == nil || err.Error() != "cannot push a digest reference" {
|
if err == nil || err.Error() != "cannot push a digest reference" {
|
||||||
t.Fatalf("expected an error, got %v", err)
|
t.Fatalf("expected an error, got %v", err)
|
||||||
}
|
}
|
||||||
|
@ -38,7 +38,7 @@ func TestImagePushAnyError(t *testing.T) {
|
||||||
client := &Client{
|
client := &Client{
|
||||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||||
}
|
}
|
||||||
_, err := client.ImagePush(context.Background(), "myimage", image.PushOptions{})
|
_, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{})
|
||||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -46,7 +46,7 @@ func TestImagePushStatusUnauthorizedError(t *testing.T) {
|
||||||
client := &Client{
|
client := &Client{
|
||||||
client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")),
|
client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")),
|
||||||
}
|
}
|
||||||
_, err := client.ImagePush(context.Background(), "myimage", image.PushOptions{})
|
_, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{})
|
||||||
assert.Check(t, is.ErrorType(err, errdefs.IsUnauthorized))
|
assert.Check(t, is.ErrorType(err, errdefs.IsUnauthorized))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ func TestImagePushWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) {
|
||||||
privilegeFunc := func() (string, error) {
|
privilegeFunc := func() (string, error) {
|
||||||
return "", fmt.Errorf("Error requesting privilege")
|
return "", fmt.Errorf("Error requesting privilege")
|
||||||
}
|
}
|
||||||
_, err := client.ImagePush(context.Background(), "myimage", image.PushOptions{
|
_, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{
|
||||||
PrivilegeFunc: privilegeFunc,
|
PrivilegeFunc: privilegeFunc,
|
||||||
})
|
})
|
||||||
if err == nil || err.Error() != "Error requesting privilege" {
|
if err == nil || err.Error() != "Error requesting privilege" {
|
||||||
|
@ -72,7 +72,7 @@ func TestImagePushWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T)
|
||||||
privilegeFunc := func() (string, error) {
|
privilegeFunc := func() (string, error) {
|
||||||
return "a-auth-header", nil
|
return "a-auth-header", nil
|
||||||
}
|
}
|
||||||
_, err := client.ImagePush(context.Background(), "myimage", image.PushOptions{
|
_, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{
|
||||||
PrivilegeFunc: privilegeFunc,
|
PrivilegeFunc: privilegeFunc,
|
||||||
})
|
})
|
||||||
assert.Check(t, is.ErrorType(err, errdefs.IsUnauthorized))
|
assert.Check(t, is.ErrorType(err, errdefs.IsUnauthorized))
|
||||||
|
@ -109,7 +109,7 @@ func TestImagePushWithPrivilegedFuncNoError(t *testing.T) {
|
||||||
privilegeFunc := func() (string, error) {
|
privilegeFunc := func() (string, error) {
|
||||||
return "IAmValid", nil
|
return "IAmValid", nil
|
||||||
}
|
}
|
||||||
resp, err := client.ImagePush(context.Background(), "myimage:tag", image.PushOptions{
|
resp, err := client.ImagePush(context.Background(), "myimage:tag", types.ImagePushOptions{
|
||||||
RegistryAuth: "NotValid",
|
RegistryAuth: "NotValid",
|
||||||
PrivilegeFunc: privilegeFunc,
|
PrivilegeFunc: privilegeFunc,
|
||||||
})
|
})
|
||||||
|
@ -179,7 +179,7 @@ func TestImagePushWithoutErrors(t *testing.T) {
|
||||||
}, nil
|
}, nil
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
resp, err := client.ImagePush(context.Background(), tc.reference, image.PushOptions{
|
resp, err := client.ImagePush(context.Background(), tc.reference, types.ImagePushOptions{
|
||||||
All: tc.all,
|
All: tc.all,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -5,11 +5,12 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/image"
|
"github.com/docker/docker/api/types/image"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ImageRemove removes an image from the docker host.
|
// ImageRemove removes an image from the docker host.
|
||||||
func (cli *Client) ImageRemove(ctx context.Context, imageID string, options image.RemoveOptions) ([]image.DeleteResponse, error) {
|
func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]image.DeleteResponse, error) {
|
||||||
query := url.Values{}
|
query := url.Values{}
|
||||||
|
|
||||||
if options.Force {
|
if options.Force {
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/image"
|
"github.com/docker/docker/api/types/image"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/v3/assert"
|
||||||
|
@ -21,7 +22,7 @@ func TestImageRemoveError(t *testing.T) {
|
||||||
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := client.ImageRemove(context.Background(), "image_id", image.RemoveOptions{})
|
_, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{})
|
||||||
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
assert.Check(t, is.ErrorType(err, errdefs.IsSystem))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +31,7 @@ func TestImageRemoveImageNotFound(t *testing.T) {
|
||||||
client: newMockClient(errorMock(http.StatusNotFound, "no such image: unknown")),
|
client: newMockClient(errorMock(http.StatusNotFound, "no such image: unknown")),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := client.ImageRemove(context.Background(), "unknown", image.RemoveOptions{})
|
_, err := client.ImageRemove(context.Background(), "unknown", types.ImageRemoveOptions{})
|
||||||
assert.Check(t, is.ErrorContains(err, "no such image: unknown"))
|
assert.Check(t, is.ErrorContains(err, "no such image: unknown"))
|
||||||
assert.Check(t, is.ErrorType(err, errdefs.IsNotFound))
|
assert.Check(t, is.ErrorType(err, errdefs.IsNotFound))
|
||||||
}
|
}
|
||||||
|
@ -92,7 +93,7 @@ func TestImageRemove(t *testing.T) {
|
||||||
}, nil
|
}, nil
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
imageDeletes, err := client.ImageRemove(context.Background(), "image_id", image.RemoveOptions{
|
imageDeletes, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{
|
||||||
Force: removeCase.force,
|
Force: removeCase.force,
|
||||||
PruneChildren: removeCase.pruneChildren,
|
PruneChildren: removeCase.pruneChildren,
|
||||||
})
|
})
|
||||||
|
|
|
@ -90,15 +90,15 @@ type ImageAPIClient interface {
|
||||||
ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
|
ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
|
||||||
BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
|
BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
|
||||||
BuildCancel(ctx context.Context, id string) error
|
BuildCancel(ctx context.Context, id string) error
|
||||||
ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error)
|
ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
|
||||||
ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error)
|
ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error)
|
||||||
ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error)
|
ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error)
|
||||||
ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error)
|
ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error)
|
||||||
ImageList(ctx context.Context, options image.ListOptions) ([]image.Summary, error)
|
ImageList(ctx context.Context, options types.ImageListOptions) ([]image.Summary, error)
|
||||||
ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error)
|
ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error)
|
||||||
ImagePull(ctx context.Context, ref string, options image.PullOptions) (io.ReadCloser, error)
|
ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error)
|
||||||
ImagePush(ctx context.Context, ref string, options image.PushOptions) (io.ReadCloser, error)
|
ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error)
|
||||||
ImageRemove(ctx context.Context, image string, options image.RemoveOptions) ([]image.DeleteResponse, error)
|
ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]image.DeleteResponse, error)
|
||||||
ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
|
ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error)
|
||||||
ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
|
ImageSave(ctx context.Context, images []string) (io.ReadCloser, error)
|
||||||
ImageTag(ctx context.Context, image, ref string) error
|
ImageTag(ctx context.Context, image, ref string) error
|
||||||
|
|
|
@ -15,7 +15,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
testBuf = []byte("Buffalo1 buffalo2 Buffalo3 buffalo4 buffalo5 buffalo6 Buffalo7 buffalo8")
|
testBuf = []byte("Buffalo buffalo Buffalo buffalo buffalo buffalo Buffalo buffalo")
|
||||||
testBufSize = len(testBuf)
|
testBufSize = len(testBuf)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -45,6 +45,9 @@ func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) error {
|
||||||
flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers")
|
flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers")
|
||||||
flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces")
|
flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces")
|
||||||
flags.BoolVar(&conf.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running")
|
flags.BoolVar(&conf.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running")
|
||||||
|
// TODO(thaJeztah): Used to produce a deprecation error; remove the flag and the OOMScoreAdjust field for the next release after v25.0.0.
|
||||||
|
flags.IntVar(&conf.OOMScoreAdjust, "oom-score-adjust", 0, "Set the oom_score_adj for the daemon (deprecated)")
|
||||||
|
_ = flags.MarkDeprecated("oom-score-adjust", "and will be removed in the next release.")
|
||||||
flags.BoolVar(&conf.Init, "init", false, "Run an init in the container to forward signals and reap processes")
|
flags.BoolVar(&conf.Init, "init", false, "Run an init in the container to forward signals and reap processes")
|
||||||
flags.StringVar(&conf.InitPath, "init-path", "", "Path to the docker-init binary")
|
flags.StringVar(&conf.InitPath, "init-path", "", "Path to the docker-init binary")
|
||||||
flags.Int64Var(&conf.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds for the parent cgroup for all containers (not supported with cgroups v2)")
|
flags.Int64Var(&conf.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds for the parent cgroup for all containers (not supported with cgroups v2)")
|
||||||
|
|
|
@ -242,7 +242,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||||
|
|
||||||
// Override BuildKit's default Resource so that it matches the semconv
|
// Override BuildKit's default Resource so that it matches the semconv
|
||||||
// version that is used in our code.
|
// version that is used in our code.
|
||||||
detect.OverrideResource(resource.Default())
|
detect.Resource = resource.Default()
|
||||||
detect.Recorder = detect.NewTraceRecorder()
|
detect.Recorder = detect.NewTraceRecorder()
|
||||||
|
|
||||||
tp, err := detect.TracerProvider()
|
tp, err := detect.TracerProvider()
|
||||||
|
@ -256,10 +256,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||||
pluginStore := plugin.NewStore()
|
pluginStore := plugin.NewStore()
|
||||||
|
|
||||||
var apiServer apiserver.Server
|
var apiServer apiserver.Server
|
||||||
cli.authzMiddleware, err = initMiddlewares(&apiServer, cli.Config, pluginStore)
|
cli.authzMiddleware = initMiddlewares(&apiServer, cli.Config, pluginStore)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to start API server")
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore, cli.authzMiddleware)
|
d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore, cli.authzMiddleware)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -310,12 +307,14 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||||
//
|
//
|
||||||
// FIXME(thaJeztah): better separate runtime and config data?
|
// FIXME(thaJeztah): better separate runtime and config data?
|
||||||
daemonCfg := d.Config()
|
daemonCfg := d.Config()
|
||||||
routerOpts, err := newRouterOptions(routerCtx, &daemonCfg, d, c)
|
routerOptions, err := newRouterOptions(routerCtx, &daemonCfg, d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
httpServer.Handler = apiServer.CreateMux(routerOpts.Build()...)
|
routerOptions.cluster = c
|
||||||
|
|
||||||
|
httpServer.Handler = apiServer.CreateMux(routerOptions.Build()...)
|
||||||
|
|
||||||
go d.ProcessClusterNotifications(ctx, c.GetWatchStream())
|
go d.ProcessClusterNotifications(ctx, c.GetWatchStream())
|
||||||
|
|
||||||
|
@ -357,7 +356,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||||
notifyStopping()
|
notifyStopping()
|
||||||
shutdownDaemon(ctx, d)
|
shutdownDaemon(ctx, d)
|
||||||
|
|
||||||
if err := routerOpts.buildkit.Close(); err != nil {
|
if err := routerOptions.buildkit.Close(); err != nil {
|
||||||
log.G(ctx).WithError(err).Error("Failed to close buildkit")
|
log.G(ctx).WithError(err).Error("Failed to close buildkit")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -380,20 +379,12 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
|
||||||
// TODO: This can be removed after buildkit is updated to use http/protobuf as the default.
|
// TODO: This can be removed after buildkit is updated to use http/protobuf as the default.
|
||||||
func setOTLPProtoDefault() {
|
func setOTLPProtoDefault() {
|
||||||
const (
|
const (
|
||||||
tracesEnv = "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL"
|
tracesEnv = "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL"
|
||||||
metricsEnv = "OTEL_EXPORTER_OTLP_METRICS_PROTOCOL"
|
protoEnv = "OTEL_EXPORTER_OTLP_PROTOCOL"
|
||||||
protoEnv = "OTEL_EXPORTER_OTLP_PROTOCOL"
|
|
||||||
|
|
||||||
defaultProto = "http/protobuf"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if os.Getenv(protoEnv) == "" {
|
if os.Getenv(tracesEnv) == "" && os.Getenv(protoEnv) == "" {
|
||||||
if os.Getenv(tracesEnv) == "" {
|
os.Setenv(tracesEnv, "http/protobuf")
|
||||||
os.Setenv(tracesEnv, defaultProto)
|
|
||||||
}
|
|
||||||
if os.Getenv(metricsEnv) == "" {
|
|
||||||
os.Setenv(metricsEnv, defaultProto)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -406,17 +397,23 @@ type routerOptions struct {
|
||||||
cluster *cluster.Cluster
|
cluster *cluster.Cluster
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRouterOptions(ctx context.Context, config *config.Config, d *daemon.Daemon, c *cluster.Cluster) (routerOptions, error) {
|
func newRouterOptions(ctx context.Context, config *config.Config, d *daemon.Daemon) (routerOptions, error) {
|
||||||
|
opts := routerOptions{}
|
||||||
sm, err := session.NewManager()
|
sm, err := session.NewManager()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return routerOptions{}, errors.Wrap(err, "failed to create sessionmanager")
|
return opts, errors.Wrap(err, "failed to create sessionmanager")
|
||||||
}
|
}
|
||||||
|
|
||||||
manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping())
|
manager, err := dockerfile.NewBuildManager(d.BuilderBackend(), d.IdentityMapping())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return routerOptions{}, err
|
return opts, err
|
||||||
}
|
}
|
||||||
cgroupParent := newCgroupParent(config)
|
cgroupParent := newCgroupParent(config)
|
||||||
|
ro := routerOptions{
|
||||||
|
sessionManager: sm,
|
||||||
|
features: d.Features,
|
||||||
|
daemon: d,
|
||||||
|
}
|
||||||
|
|
||||||
bk, err := buildkit.New(ctx, buildkit.Opt{
|
bk, err := buildkit.New(ctx, buildkit.Opt{
|
||||||
SessionManager: sm,
|
SessionManager: sm,
|
||||||
|
@ -438,22 +435,18 @@ func newRouterOptions(ctx context.Context, config *config.Config, d *daemon.Daem
|
||||||
ContainerdNamespace: config.ContainerdNamespace,
|
ContainerdNamespace: config.ContainerdNamespace,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return routerOptions{}, err
|
return opts, err
|
||||||
}
|
}
|
||||||
|
|
||||||
bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService)
|
bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return routerOptions{}, errors.Wrap(err, "failed to create buildmanager")
|
return opts, errors.Wrap(err, "failed to create buildmanager")
|
||||||
}
|
}
|
||||||
|
|
||||||
return routerOptions{
|
ro.buildBackend = bb
|
||||||
sessionManager: sm,
|
ro.buildkit = bk
|
||||||
buildBackend: bb,
|
|
||||||
features: d.Features,
|
return ro, nil
|
||||||
buildkit: bk,
|
|
||||||
daemon: d,
|
|
||||||
cluster: c,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cli *DaemonCli) reloadConfig() {
|
func (cli *DaemonCli) reloadConfig() {
|
||||||
|
@ -629,10 +622,6 @@ func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
|
||||||
conf.CDISpecDirs = nil
|
conf.CDISpecDirs = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := loadCLIPlatformConfig(conf); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return conf, nil
|
return conf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -719,15 +708,14 @@ func (opts routerOptions) Build() []router.Router {
|
||||||
return routers
|
return routers
|
||||||
}
|
}
|
||||||
|
|
||||||
func initMiddlewares(s *apiserver.Server, cfg *config.Config, pluginStore plugingetter.PluginGetter) (*authorization.Middleware, error) {
|
func initMiddlewares(s *apiserver.Server, cfg *config.Config, pluginStore plugingetter.PluginGetter) *authorization.Middleware {
|
||||||
|
v := dockerversion.Version
|
||||||
|
|
||||||
exp := middleware.NewExperimentalMiddleware(cfg.Experimental)
|
exp := middleware.NewExperimentalMiddleware(cfg.Experimental)
|
||||||
s.UseMiddleware(exp)
|
s.UseMiddleware(exp)
|
||||||
|
|
||||||
vm, err := middleware.NewVersionMiddleware(dockerversion.Version, api.DefaultVersion, cfg.MinAPIVersion)
|
vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, cfg.MinAPIVersion)
|
||||||
if err != nil {
|
s.UseMiddleware(vm)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
s.UseMiddleware(*vm)
|
|
||||||
|
|
||||||
if cfg.CorsHeaders != "" {
|
if cfg.CorsHeaders != "" {
|
||||||
c := middleware.NewCORSMiddleware(cfg.CorsHeaders)
|
c := middleware.NewCORSMiddleware(cfg.CorsHeaders)
|
||||||
|
@ -736,7 +724,7 @@ func initMiddlewares(s *apiserver.Server, cfg *config.Config, pluginStore plugin
|
||||||
|
|
||||||
authzMiddleware := authorization.NewMiddleware(cfg.AuthorizationPlugins, pluginStore)
|
authzMiddleware := authorization.NewMiddleware(cfg.AuthorizationPlugins, pluginStore)
|
||||||
s.UseMiddleware(authzMiddleware)
|
s.UseMiddleware(authzMiddleware)
|
||||||
return authzMiddleware, nil
|
return authzMiddleware
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) {
|
func (cli *DaemonCli) getContainerdDaemonOpts() ([]supervisor.DaemonOpt, error) {
|
||||||
|
@ -844,7 +832,6 @@ func loadListeners(cfg *config.Config, tlsConfig *tls.Config) ([]net.Listener, [
|
||||||
if proto == "tcp" && !authEnabled {
|
if proto == "tcp" && !authEnabled {
|
||||||
log.G(ctx).WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.")
|
log.G(ctx).WithField("host", protoAddr).Warn("Binding to IP address without --tlsverify is insecure and gives root access on this machine to everyone who has access to your network.")
|
||||||
log.G(ctx).WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!")
|
log.G(ctx).WithField("host", protoAddr).Warn("Binding to an IP address, even on localhost, can also give access to scripts run in a browser. Be safe out there!")
|
||||||
log.G(ctx).WithField("host", protoAddr).Warn("[DEPRECATION NOTICE] In future versions this will be a hard failure preventing the daemon from starting! Learn more at: https://docs.docker.com/go/api-security/")
|
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
|
|
||||||
// If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot"
|
// If TLSVerify is explicitly set to false we'll take that as "Please let me shoot myself in the foot"
|
||||||
|
|
|
@ -3,28 +3,11 @@ package main
|
||||||
import (
|
import (
|
||||||
cdcgroups "github.com/containerd/cgroups/v3"
|
cdcgroups "github.com/containerd/cgroups/v3"
|
||||||
systemdDaemon "github.com/coreos/go-systemd/v22/daemon"
|
systemdDaemon "github.com/coreos/go-systemd/v22/daemon"
|
||||||
"github.com/docker/docker/daemon"
|
|
||||||
"github.com/docker/docker/daemon/config"
|
"github.com/docker/docker/daemon/config"
|
||||||
"github.com/docker/docker/pkg/sysinfo"
|
"github.com/docker/docker/pkg/sysinfo"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// loadCLIPlatformConfig loads the platform specific CLI configuration
|
|
||||||
func loadCLIPlatformConfig(conf *config.Config) error {
|
|
||||||
if conf.RemappedRoot == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
containerdNamespace, containerdPluginNamespace, err := daemon.RemapContainerdNamespaces(conf)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
conf.ContainerdNamespace = containerdNamespace
|
|
||||||
conf.ContainerdPluginNamespace = containerdPluginNamespace
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// preNotifyReady sends a message to the host when the API is active, but before the daemon is
|
// preNotifyReady sends a message to the host when the API is active, but before the daemon is
|
||||||
func preNotifyReady() {
|
func preNotifyReady() {
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,12 +16,6 @@ func getDefaultDaemonConfigFile() (string, error) {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadCLIPlatformConfig loads the platform specific CLI configuration
|
|
||||||
// there is none on windows, so this is a no-op
|
|
||||||
func loadCLIPlatformConfig(conf *config.Config) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setDefaultUmask doesn't do anything on windows
|
// setDefaultUmask doesn't do anything on windows
|
||||||
func setDefaultUmask() error {
|
func setDefaultUmask() error {
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// attachContext is the context used for attach calls.
|
// attachContext is the context used for for attach calls.
|
||||||
type attachContext struct {
|
type attachContext struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
|
|
|
@ -299,8 +299,8 @@ func (container *Container) SetupWorkingDirectory(rootIdentity idtools.Identity)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
workdir := filepath.Clean(container.Config.WorkingDir)
|
container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir)
|
||||||
pth, err := container.GetResourcePath(workdir)
|
pth, err := container.GetResourcePath(container.Config.WorkingDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -514,14 +514,14 @@ func (container *Container) AddMountPointWithVolume(destination string, vol volu
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmountVolumes unmounts all volumes
|
// UnmountVolumes unmounts all volumes
|
||||||
func (container *Container) UnmountVolumes(ctx context.Context, volumeEventLog func(name string, action events.Action, attributes map[string]string)) error {
|
func (container *Container) UnmountVolumes(volumeEventLog func(name string, action events.Action, attributes map[string]string)) error {
|
||||||
var errs []string
|
var errs []string
|
||||||
for _, volumeMount := range container.MountPoints {
|
for _, volumeMount := range container.MountPoints {
|
||||||
if volumeMount.Volume == nil {
|
if volumeMount.Volume == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := volumeMount.Cleanup(ctx); err != nil {
|
if err := volumeMount.Cleanup(); err != nil {
|
||||||
errs = append(errs, err.Error())
|
errs = append(errs, err.Error())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,8 @@ import (
|
||||||
"github.com/docker/docker/api/types/events"
|
"github.com/docker/docker/api/types/events"
|
||||||
mounttypes "github.com/docker/docker/api/types/mount"
|
mounttypes "github.com/docker/docker/api/types/mount"
|
||||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||||
|
"github.com/docker/docker/pkg/stringid"
|
||||||
|
"github.com/docker/docker/volume"
|
||||||
volumemounts "github.com/docker/docker/volume/mounts"
|
volumemounts "github.com/docker/docker/volume/mounts"
|
||||||
"github.com/moby/sys/mount"
|
"github.com/moby/sys/mount"
|
||||||
"github.com/opencontainers/selinux/go-selinux/label"
|
"github.com/opencontainers/selinux/go-selinux/label"
|
||||||
|
@ -127,11 +129,34 @@ func (container *Container) NetworkMounts() []Mount {
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopyImagePathContent copies files in destination to the volume.
|
// CopyImagePathContent copies files in destination to the volume.
|
||||||
func (container *Container) CopyImagePathContent(volumePath, destination string) error {
|
func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error {
|
||||||
if err := label.Relabel(volumePath, container.MountLabel, true); err != nil && !errors.Is(err, syscall.ENOTSUP) {
|
rootfs, err := container.GetResourcePath(destination)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return copyExistingContents(destination, volumePath)
|
|
||||||
|
if _, err := os.Stat(rootfs); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
id := stringid.GenerateRandomID()
|
||||||
|
path, err := v.Mount(id)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err := v.Unmount(id); err != nil {
|
||||||
|
log.G(context.TODO()).Warnf("error while unmounting volume %s: %v", v.Name(), err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err := label.Relabel(path, container.MountLabel, true); err != nil && !errors.Is(err, syscall.ENOTSUP) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return copyExistingContents(rootfs, path)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShmResourcePath returns path to shm
|
// ShmResourcePath returns path to shm
|
||||||
|
@ -371,7 +396,7 @@ func (container *Container) DetachAndUnmount(volumeEventLog func(name string, ac
|
||||||
Warn("Unable to unmount")
|
Warn("Unable to unmount")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return container.UnmountVolumes(ctx, volumeEventLog)
|
return container.UnmountVolumes(volumeEventLog)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ignoreUnsupportedXAttrs ignores errors when extended attributes
|
// ignoreUnsupportedXAttrs ignores errors when extended attributes
|
||||||
|
@ -394,13 +419,9 @@ func copyExistingContents(source, destination string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(dstList) != 0 {
|
if len(dstList) != 0 {
|
||||||
log.G(context.TODO()).WithFields(log.Fields{
|
// destination is not empty, do not copy
|
||||||
"source": source,
|
|
||||||
"destination": destination,
|
|
||||||
}).Debug("destination is not empty, do not copy")
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return fs.CopyDir(destination, source, ignoreUnsupportedXAttrs())
|
return fs.CopyDir(destination, source, ignoreUnsupportedXAttrs())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package container // import "github.com/docker/docker/container"
|
package container // import "github.com/docker/docker/container"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
@ -129,7 +128,7 @@ func (container *Container) ConfigMounts() []Mount {
|
||||||
// On Windows it only delegates to `UnmountVolumes` since there is nothing to
|
// On Windows it only delegates to `UnmountVolumes` since there is nothing to
|
||||||
// force unmount.
|
// force unmount.
|
||||||
func (container *Container) DetachAndUnmount(volumeEventLog func(name string, action events.Action, attributes map[string]string)) error {
|
func (container *Container) DetachAndUnmount(volumeEventLog func(name string, action events.Action, attributes map[string]string)) error {
|
||||||
return container.UnmountVolumes(context.TODO(), volumeEventLog)
|
return container.UnmountVolumes(volumeEventLog)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TmpfsMounts returns the list of tmpfs mounts
|
// TmpfsMounts returns the list of tmpfs mounts
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
# * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns|implicit): the rootlesskit port driver. Defaults to "builtin".
|
# * DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER=(builtin|slirp4netns|implicit): the rootlesskit port driver. Defaults to "builtin".
|
||||||
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
|
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX=(auto|true|false): whether to protect slirp4netns with a dedicated mount namespace. Defaults to "auto".
|
||||||
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto".
|
# * DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP=(auto|true|false): whether to protect slirp4netns with seccomp. Defaults to "auto".
|
||||||
# * DOCKERD_ROOTLESS_ROOTLESSKIT_DISABLE_HOST_LOOPBACK=(true|false): prohibit connections to 127.0.0.1 on the host (including via 10.0.2.2, in the case of slirp4netns). Defaults to "true".
|
|
||||||
|
|
||||||
# To apply an environment variable via systemd, create ~/.config/systemd/user/docker.service.d/override.conf as follows,
|
# To apply an environment variable via systemd, create ~/.config/systemd/user/docker.service.d/override.conf as follows,
|
||||||
# and run `systemctl --user daemon-reload && systemctl --user restart docker`:
|
# and run `systemctl --user daemon-reload && systemctl --user restart docker`:
|
||||||
|
@ -72,7 +71,6 @@ fi
|
||||||
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}"
|
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER:=builtin}"
|
||||||
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
|
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX:=auto}"
|
||||||
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
|
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP:=auto}"
|
||||||
: "${DOCKERD_ROOTLESS_ROOTLESSKIT_DISABLE_HOST_LOOPBACK:=}"
|
|
||||||
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
|
net=$DOCKERD_ROOTLESS_ROOTLESSKIT_NET
|
||||||
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
|
mtu=$DOCKERD_ROOTLESS_ROOTLESSKIT_MTU
|
||||||
if [ -z "$net" ]; then
|
if [ -z "$net" ]; then
|
||||||
|
@ -100,11 +98,6 @@ if [ -z "$mtu" ]; then
|
||||||
mtu=1500
|
mtu=1500
|
||||||
fi
|
fi
|
||||||
|
|
||||||
host_loopback="--disable-host-loopback"
|
|
||||||
if [ "$DOCKERD_ROOTLESS_ROOTLESSKIT_DISABLE_HOST_LOOPBACK" = "false" ]; then
|
|
||||||
host_loopback=""
|
|
||||||
fi
|
|
||||||
|
|
||||||
dockerd="${DOCKERD:-dockerd}"
|
dockerd="${DOCKERD:-dockerd}"
|
||||||
|
|
||||||
if [ -z "$_DOCKERD_ROOTLESS_CHILD" ]; then
|
if [ -z "$_DOCKERD_ROOTLESS_CHILD" ]; then
|
||||||
|
@ -132,7 +125,7 @@ if [ -z "$_DOCKERD_ROOTLESS_CHILD" ]; then
|
||||||
--net=$net --mtu=$mtu \
|
--net=$net --mtu=$mtu \
|
||||||
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
|
--slirp4netns-sandbox=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SANDBOX \
|
||||||
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
|
--slirp4netns-seccomp=$DOCKERD_ROOTLESS_ROOTLESSKIT_SLIRP4NETNS_SECCOMP \
|
||||||
$host_loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
|
--disable-host-loopback --port-driver=$DOCKERD_ROOTLESS_ROOTLESSKIT_PORT_DRIVER \
|
||||||
--copy-up=/etc --copy-up=/run \
|
--copy-up=/etc --copy-up=/run \
|
||||||
--propagation=rslave \
|
--propagation=rslave \
|
||||||
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
|
$DOCKERD_ROOTLESS_ROOTLESSKIT_FLAGS \
|
||||||
|
|
|
@ -8,6 +8,25 @@ import (
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ContainerCopy performs a deprecated operation of archiving the resource at
|
||||||
|
// the specified path in the container identified by the given name.
|
||||||
|
func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
|
||||||
|
ctr, err := daemon.GetContainer(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := daemon.containerCopy(ctr, res)
|
||||||
|
if err == nil {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, containerFileNotFound{res, name}
|
||||||
|
}
|
||||||
|
return nil, errdefs.System(err)
|
||||||
|
}
|
||||||
|
|
||||||
// ContainerStatPath stats the filesystem resource at the specified path in the
|
// ContainerStatPath stats the filesystem resource at the specified path in the
|
||||||
// container identified by the given name.
|
// container identified by the given name.
|
||||||
func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) {
|
func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) {
|
||||||
|
|
|
@ -161,6 +161,55 @@ func (daemon *Daemon) containerExtractToDir(container *container.Container, path
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) {
|
||||||
|
container.Lock()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
// Wait to unlock the container until the archive is fully read
|
||||||
|
// (see the ReadCloseWrapper func below) or if there is an error
|
||||||
|
// before that occurs.
|
||||||
|
container.Unlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
cfs, err := daemon.openContainerFS(container)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
cfs.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = cfs.RunInFS(context.TODO(), func() error {
|
||||||
|
_, err := os.Stat(resource)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
tb, err := archive.NewTarballer(resource, &archive.TarOptions{
|
||||||
|
Compression: archive.Uncompressed,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cfs.GoInFS(context.TODO(), tb.Do)
|
||||||
|
archv := tb.Reader()
|
||||||
|
reader := ioutils.NewReadCloserWrapper(archv, func() error {
|
||||||
|
err := archv.Close()
|
||||||
|
_ = cfs.Close()
|
||||||
|
container.Unlock()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
daemon.LogContainerEvent(container, events.ActionCopy)
|
||||||
|
return reader, nil
|
||||||
|
}
|
||||||
|
|
||||||
// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it
|
// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it
|
||||||
// cannot be in a read-only volume. If it is not in a volume, the container
|
// cannot be in a read-only volume. If it is not in a volume, the container
|
||||||
// cannot be configured with a read-only rootfs.
|
// cannot be configured with a read-only rootfs.
|
||||||
|
|
|
@ -120,9 +120,8 @@ func containerSpecFromGRPC(c *swarmapi.ContainerSpec) *types.ContainerSpec {
|
||||||
|
|
||||||
if m.VolumeOptions != nil {
|
if m.VolumeOptions != nil {
|
||||||
mount.VolumeOptions = &mounttypes.VolumeOptions{
|
mount.VolumeOptions = &mounttypes.VolumeOptions{
|
||||||
NoCopy: m.VolumeOptions.NoCopy,
|
NoCopy: m.VolumeOptions.NoCopy,
|
||||||
Labels: m.VolumeOptions.Labels,
|
Labels: m.VolumeOptions.Labels,
|
||||||
Subpath: m.VolumeOptions.Subpath,
|
|
||||||
}
|
}
|
||||||
if m.VolumeOptions.DriverConfig != nil {
|
if m.VolumeOptions.DriverConfig != nil {
|
||||||
mount.VolumeOptions.DriverConfig = &mounttypes.Driver{
|
mount.VolumeOptions.DriverConfig = &mounttypes.Driver{
|
||||||
|
@ -407,9 +406,8 @@ func containerToGRPC(c *types.ContainerSpec) (*swarmapi.ContainerSpec, error) {
|
||||||
|
|
||||||
if m.VolumeOptions != nil {
|
if m.VolumeOptions != nil {
|
||||||
mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{
|
mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{
|
||||||
NoCopy: m.VolumeOptions.NoCopy,
|
NoCopy: m.VolumeOptions.NoCopy,
|
||||||
Labels: m.VolumeOptions.Labels,
|
Labels: m.VolumeOptions.Labels,
|
||||||
Subpath: m.VolumeOptions.Subpath,
|
|
||||||
}
|
}
|
||||||
if m.VolumeOptions.DriverConfig != nil {
|
if m.VolumeOptions.DriverConfig != nil {
|
||||||
mount.VolumeOptions.DriverConfig = &swarmapi.Driver{
|
mount.VolumeOptions.DriverConfig = &swarmapi.Driver{
|
||||||
|
|
|
@ -1,42 +0,0 @@
|
||||||
package convert
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/docker/docker/pkg/plugingetter"
|
|
||||||
"github.com/moby/swarmkit/v2/node/plugin"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SwarmPluginGetter adapts a plugingetter.PluginGetter to a Swarmkit plugin.Getter.
|
|
||||||
func SwarmPluginGetter(pg plugingetter.PluginGetter) plugin.Getter {
|
|
||||||
return pluginGetter{pg}
|
|
||||||
}
|
|
||||||
|
|
||||||
type pluginGetter struct {
|
|
||||||
pg plugingetter.PluginGetter
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ plugin.Getter = (*pluginGetter)(nil)
|
|
||||||
|
|
||||||
type swarmPlugin struct {
|
|
||||||
plugingetter.CompatPlugin
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p swarmPlugin) Client() plugin.Client {
|
|
||||||
return p.CompatPlugin.Client()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g pluginGetter) Get(name string, capability string) (plugin.Plugin, error) {
|
|
||||||
p, err := g.pg.Get(name, capability, plugingetter.Lookup)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return swarmPlugin{p}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g pluginGetter) GetAllManagedPluginsByCap(capability string) []plugin.Plugin {
|
|
||||||
pp := g.pg.GetAllManagedPluginsByCap(capability)
|
|
||||||
ret := make([]plugin.Plugin, len(pp))
|
|
||||||
for i, p := range pp {
|
|
||||||
ret[i] = swarmPlugin{p}
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
|
@ -4,13 +4,11 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
containertypes "github.com/docker/docker/api/types/container"
|
containertypes "github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/mount"
|
|
||||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
swarmtypes "github.com/docker/docker/api/types/swarm"
|
||||||
"github.com/docker/docker/api/types/swarm/runtime"
|
"github.com/docker/docker/api/types/swarm/runtime"
|
||||||
google_protobuf3 "github.com/gogo/protobuf/types"
|
google_protobuf3 "github.com/gogo/protobuf/types"
|
||||||
swarmapi "github.com/moby/swarmkit/v2/api"
|
swarmapi "github.com/moby/swarmkit/v2/api"
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/v3/assert"
|
||||||
is "gotest.tools/v3/assert/cmp"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestServiceConvertFromGRPCRuntimeContainer(t *testing.T) {
|
func TestServiceConvertFromGRPCRuntimeContainer(t *testing.T) {
|
||||||
|
@ -111,11 +109,11 @@ func TestServiceConvertToGRPCGenericRuntimePlugin(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServiceConvertToGRPCContainerRuntime(t *testing.T) {
|
func TestServiceConvertToGRPCContainerRuntime(t *testing.T) {
|
||||||
const imgName = "alpine:latest"
|
image := "alpine:latest"
|
||||||
s := swarmtypes.ServiceSpec{
|
s := swarmtypes.ServiceSpec{
|
||||||
TaskTemplate: swarmtypes.TaskSpec{
|
TaskTemplate: swarmtypes.TaskSpec{
|
||||||
ContainerSpec: &swarmtypes.ContainerSpec{
|
ContainerSpec: &swarmtypes.ContainerSpec{
|
||||||
Image: imgName,
|
Image: image,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Mode: swarmtypes.ServiceMode{
|
Mode: swarmtypes.ServiceMode{
|
||||||
|
@ -133,8 +131,8 @@ func TestServiceConvertToGRPCContainerRuntime(t *testing.T) {
|
||||||
t.Fatal("expected type swarmapi.TaskSpec_Container")
|
t.Fatal("expected type swarmapi.TaskSpec_Container")
|
||||||
}
|
}
|
||||||
|
|
||||||
if v.Container.Image != imgName {
|
if v.Container.Image != image {
|
||||||
t.Fatalf("expected image %s; received %s", imgName, v.Container.Image)
|
t.Fatalf("expected image %s; received %s", image, v.Container.Image)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -613,32 +611,3 @@ func TestServiceConvertToGRPCConfigs(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServiceConvertToGRPCVolumeSubpath(t *testing.T) {
|
|
||||||
s := swarmtypes.ServiceSpec{
|
|
||||||
TaskTemplate: swarmtypes.TaskSpec{
|
|
||||||
ContainerSpec: &swarmtypes.ContainerSpec{
|
|
||||||
Mounts: []mount.Mount{
|
|
||||||
{
|
|
||||||
Source: "/foo/bar",
|
|
||||||
Target: "/baz",
|
|
||||||
Type: mount.TypeVolume,
|
|
||||||
ReadOnly: false,
|
|
||||||
VolumeOptions: &mount.VolumeOptions{
|
|
||||||
Subpath: "sub",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
g, err := ServiceSpecToGRPC(s)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
v, ok := g.Task.Runtime.(*swarmapi.TaskSpec_Container)
|
|
||||||
assert.Assert(t, ok)
|
|
||||||
|
|
||||||
assert.Check(t, is.Len(v.Container.Mounts, 1))
|
|
||||||
assert.Check(t, is.Equal(v.Container.Mounts[0].VolumeOptions.Subpath, "sub"))
|
|
||||||
}
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/events"
|
"github.com/docker/docker/api/types/events"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
|
opts "github.com/docker/docker/api/types/image"
|
||||||
"github.com/docker/docker/api/types/network"
|
"github.com/docker/docker/api/types/network"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/docker/api/types/registry"
|
||||||
"github.com/docker/docker/api/types/swarm"
|
"github.com/docker/docker/api/types/swarm"
|
||||||
|
@ -38,7 +39,7 @@ type Backend interface {
|
||||||
SetupIngress(clustertypes.NetworkCreateRequest, string) (<-chan struct{}, error)
|
SetupIngress(clustertypes.NetworkCreateRequest, string) (<-chan struct{}, error)
|
||||||
ReleaseIngress() (<-chan struct{}, error)
|
ReleaseIngress() (<-chan struct{}, error)
|
||||||
CreateManagedContainer(ctx context.Context, config backend.ContainerCreateConfig) (container.CreateResponse, error)
|
CreateManagedContainer(ctx context.Context, config backend.ContainerCreateConfig) (container.CreateResponse, error)
|
||||||
ContainerStart(ctx context.Context, name string, checkpoint string, checkpointDir string) error
|
ContainerStart(ctx context.Context, name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error
|
||||||
ContainerStop(ctx context.Context, name string, config container.StopOptions) error
|
ContainerStop(ctx context.Context, name string, config container.StopOptions) error
|
||||||
ContainerLogs(ctx context.Context, name string, config *container.LogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error)
|
ContainerLogs(ctx context.Context, name string, config *container.LogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error)
|
||||||
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
|
ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error
|
||||||
|
@ -77,5 +78,5 @@ type VolumeBackend interface {
|
||||||
type ImageBackend interface {
|
type ImageBackend interface {
|
||||||
PullImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
PullImage(ctx context.Context, ref reference.Named, platform *ocispec.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||||
GetRepositories(context.Context, reference.Named, *registry.AuthConfig) ([]distribution.Repository, error)
|
GetRepositories(context.Context, reference.Named, *registry.AuthConfig) ([]distribution.Repository, error)
|
||||||
GetImage(ctx context.Context, refOrID string, options backend.GetImageOpts) (*image.Image, error)
|
GetImage(ctx context.Context, refOrID string, options opts.GetImageOpts) (*image.Image, error)
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,14 +17,13 @@ import (
|
||||||
"github.com/docker/docker/api/types/backend"
|
"github.com/docker/docker/api/types/backend"
|
||||||
containertypes "github.com/docker/docker/api/types/container"
|
containertypes "github.com/docker/docker/api/types/container"
|
||||||
"github.com/docker/docker/api/types/events"
|
"github.com/docker/docker/api/types/events"
|
||||||
"github.com/docker/docker/api/types/network"
|
imagetypes "github.com/docker/docker/api/types/image"
|
||||||
"github.com/docker/docker/api/types/registry"
|
"github.com/docker/docker/api/types/registry"
|
||||||
containerpkg "github.com/docker/docker/container"
|
containerpkg "github.com/docker/docker/container"
|
||||||
"github.com/docker/docker/daemon"
|
"github.com/docker/docker/daemon"
|
||||||
"github.com/docker/docker/daemon/cluster/convert"
|
"github.com/docker/docker/daemon/cluster/convert"
|
||||||
executorpkg "github.com/docker/docker/daemon/cluster/executor"
|
executorpkg "github.com/docker/docker/daemon/cluster/executor"
|
||||||
"github.com/docker/docker/libnetwork"
|
"github.com/docker/docker/libnetwork"
|
||||||
"github.com/docker/docker/runconfig"
|
|
||||||
volumeopts "github.com/docker/docker/volume/service/opts"
|
volumeopts "github.com/docker/docker/volume/service/opts"
|
||||||
gogotypes "github.com/gogo/protobuf/types"
|
gogotypes "github.com/gogo/protobuf/types"
|
||||||
"github.com/moby/swarmkit/v2/agent/exec"
|
"github.com/moby/swarmkit/v2/agent/exec"
|
||||||
|
@ -77,7 +76,7 @@ func (c *containerAdapter) pullImage(ctx context.Context) error {
|
||||||
named, err := reference.ParseNormalizedNamed(spec.Image)
|
named, err := reference.ParseNormalizedNamed(spec.Image)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if _, ok := named.(reference.Canonical); ok {
|
if _, ok := named.(reference.Canonical); ok {
|
||||||
_, err := c.imageBackend.GetImage(ctx, spec.Image, backend.GetImageOpts{})
|
_, err := c.imageBackend.GetImage(ctx, spec.Image, imagetypes.GetImageOpts{})
|
||||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -292,34 +291,14 @@ func (c *containerAdapter) waitForDetach(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *containerAdapter) create(ctx context.Context) error {
|
func (c *containerAdapter) create(ctx context.Context) error {
|
||||||
hostConfig := c.container.hostConfig(c.dependencies.Volumes())
|
|
||||||
netConfig := c.container.createNetworkingConfig(c.backend)
|
|
||||||
|
|
||||||
// We need to make sure no empty string or "default" NetworkMode is
|
|
||||||
// provided to the daemon as it doesn't support them.
|
|
||||||
//
|
|
||||||
// This is in line with what the ContainerCreate API endpoint does, but
|
|
||||||
// unlike that endpoint we can't do that in the ServiceCreate endpoint as
|
|
||||||
// the cluster leader and the current node might not be running on the same
|
|
||||||
// OS. Since the normalized value isn't the same on Windows and Linux, we
|
|
||||||
// need to make this normalization happen once we're sure we won't make a
|
|
||||||
// cross-OS API call.
|
|
||||||
if hostConfig.NetworkMode == "" || hostConfig.NetworkMode.IsDefault() {
|
|
||||||
hostConfig.NetworkMode = runconfig.DefaultDaemonNetworkMode()
|
|
||||||
if v, ok := netConfig.EndpointsConfig[network.NetworkDefault]; ok {
|
|
||||||
delete(netConfig.EndpointsConfig, network.NetworkDefault)
|
|
||||||
netConfig.EndpointsConfig[runconfig.DefaultDaemonNetworkMode().NetworkName()] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var cr containertypes.CreateResponse
|
var cr containertypes.CreateResponse
|
||||||
var err error
|
var err error
|
||||||
if cr, err = c.backend.CreateManagedContainer(ctx, backend.ContainerCreateConfig{
|
if cr, err = c.backend.CreateManagedContainer(ctx, backend.ContainerCreateConfig{
|
||||||
Name: c.container.name(),
|
Name: c.container.name(),
|
||||||
Config: c.container.config(),
|
Config: c.container.config(),
|
||||||
HostConfig: hostConfig,
|
HostConfig: c.container.hostConfig(c.dependencies.Volumes()),
|
||||||
// Use the first network in container create
|
// Use the first network in container create
|
||||||
NetworkingConfig: netConfig,
|
NetworkingConfig: c.container.createNetworkingConfig(c.backend),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -369,7 +348,7 @@ func (c *containerAdapter) start(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.backend.ContainerStart(ctx, c.container.name(), "", "")
|
return c.backend.ContainerStart(ctx, c.container.name(), nil, "", "")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) {
|
func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) {
|
||||||
|
|
|
@ -338,8 +338,7 @@ func convertMount(m api.Mount) enginemount.Mount {
|
||||||
|
|
||||||
if m.VolumeOptions != nil {
|
if m.VolumeOptions != nil {
|
||||||
mount.VolumeOptions = &enginemount.VolumeOptions{
|
mount.VolumeOptions = &enginemount.VolumeOptions{
|
||||||
NoCopy: m.VolumeOptions.NoCopy,
|
NoCopy: m.VolumeOptions.NoCopy,
|
||||||
Subpath: m.VolumeOptions.Subpath,
|
|
||||||
}
|
}
|
||||||
if m.VolumeOptions.Labels != nil {
|
if m.VolumeOptions.Labels != nil {
|
||||||
mount.VolumeOptions.Labels = make(map[string]string, len(m.VolumeOptions.Labels))
|
mount.VolumeOptions.Labels = make(map[string]string, len(m.VolumeOptions.Labels))
|
||||||
|
|
|
@ -52,7 +52,7 @@ func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBac
|
||||||
pluginBackend: p,
|
pluginBackend: p,
|
||||||
imageBackend: i,
|
imageBackend: i,
|
||||||
volumeBackend: v,
|
volumeBackend: v,
|
||||||
dependencies: agent.NewDependencyManager(convert.SwarmPluginGetter(b.PluginGetter())),
|
dependencies: agent.NewDependencyManager(b.PluginGetter()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -10,12 +10,10 @@ import (
|
||||||
|
|
||||||
"github.com/containerd/log"
|
"github.com/containerd/log"
|
||||||
types "github.com/docker/docker/api/types/swarm"
|
types "github.com/docker/docker/api/types/swarm"
|
||||||
"github.com/docker/docker/daemon/cluster/convert"
|
|
||||||
"github.com/docker/docker/daemon/cluster/executor/container"
|
"github.com/docker/docker/daemon/cluster/executor/container"
|
||||||
lncluster "github.com/docker/docker/libnetwork/cluster"
|
lncluster "github.com/docker/docker/libnetwork/cluster"
|
||||||
"github.com/docker/docker/libnetwork/cnmallocator"
|
|
||||||
swarmapi "github.com/moby/swarmkit/v2/api"
|
swarmapi "github.com/moby/swarmkit/v2/api"
|
||||||
"github.com/moby/swarmkit/v2/manager/allocator/networkallocator"
|
swarmallocator "github.com/moby/swarmkit/v2/manager/allocator/cnmallocator"
|
||||||
swarmnode "github.com/moby/swarmkit/v2/node"
|
swarmnode "github.com/moby/swarmkit/v2/node"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
@ -125,7 +123,7 @@ func (n *nodeRunner) start(conf nodeStartConfig) error {
|
||||||
ListenControlAPI: control,
|
ListenControlAPI: control,
|
||||||
ListenRemoteAPI: conf.ListenAddr,
|
ListenRemoteAPI: conf.ListenAddr,
|
||||||
AdvertiseRemoteAPI: conf.AdvertiseAddr,
|
AdvertiseRemoteAPI: conf.AdvertiseAddr,
|
||||||
NetworkConfig: &networkallocator.Config{
|
NetworkConfig: &swarmallocator.NetworkConfig{
|
||||||
DefaultAddrPool: conf.DefaultAddressPool,
|
DefaultAddrPool: conf.DefaultAddressPool,
|
||||||
SubnetSize: conf.SubnetSize,
|
SubnetSize: conf.SubnetSize,
|
||||||
VXLANUDPPort: conf.DataPathPort,
|
VXLANUDPPort: conf.DataPathPort,
|
||||||
|
@ -146,8 +144,7 @@ func (n *nodeRunner) start(conf nodeStartConfig) error {
|
||||||
ElectionTick: n.cluster.config.RaftElectionTick,
|
ElectionTick: n.cluster.config.RaftElectionTick,
|
||||||
UnlockKey: conf.lockKey,
|
UnlockKey: conf.lockKey,
|
||||||
AutoLockManagers: conf.autolock,
|
AutoLockManagers: conf.autolock,
|
||||||
PluginGetter: convert.SwarmPluginGetter(n.cluster.config.Backend.PluginGetter()),
|
PluginGetter: n.cluster.config.Backend.PluginGetter(),
|
||||||
NetworkProvider: cnmallocator.NewProvider(n.cluster.config.Backend.PluginGetter()),
|
|
||||||
}
|
}
|
||||||
if conf.availability != "" {
|
if conf.availability != "" {
|
||||||
avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))]
|
avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))]
|
||||||
|
|
|
@ -56,9 +56,9 @@ const (
|
||||||
DefaultPluginNamespace = "plugins.moby"
|
DefaultPluginNamespace = "plugins.moby"
|
||||||
// defaultMinAPIVersion is the minimum API version supported by the API.
|
// defaultMinAPIVersion is the minimum API version supported by the API.
|
||||||
// This version can be overridden through the "DOCKER_MIN_API_VERSION"
|
// This version can be overridden through the "DOCKER_MIN_API_VERSION"
|
||||||
// environment variable. It currently defaults to the minimum API version
|
// environment variable. The minimum allowed version is determined
|
||||||
// supported by the API server.
|
// by [minAPIVersion].
|
||||||
defaultMinAPIVersion = api.MinSupportedAPIVersion
|
defaultMinAPIVersion = "1.24"
|
||||||
// SeccompProfileDefault is the built-in default seccomp profile.
|
// SeccompProfileDefault is the built-in default seccomp profile.
|
||||||
SeccompProfileDefault = "builtin"
|
SeccompProfileDefault = "builtin"
|
||||||
// SeccompProfileUnconfined is a special profile name for seccomp to use an
|
// SeccompProfileUnconfined is a special profile name for seccomp to use an
|
||||||
|
@ -610,8 +610,8 @@ func ValidateMinAPIVersion(ver string) error {
|
||||||
if strings.EqualFold(ver[0:1], "v") {
|
if strings.EqualFold(ver[0:1], "v") {
|
||||||
return errors.New(`API version must be provided without "v" prefix`)
|
return errors.New(`API version must be provided without "v" prefix`)
|
||||||
}
|
}
|
||||||
if versions.LessThan(ver, defaultMinAPIVersion) {
|
if versions.LessThan(ver, minAPIVersion) {
|
||||||
return errors.Errorf(`minimum supported API version is %s: %s`, defaultMinAPIVersion, ver)
|
return errors.Errorf(`minimum supported API version is %s: %s`, minAPIVersion, ver)
|
||||||
}
|
}
|
||||||
if versions.GreaterThan(ver, api.DefaultVersion) {
|
if versions.GreaterThan(ver, api.DefaultVersion) {
|
||||||
return errors.Errorf(`maximum supported API version is %s: %s`, api.DefaultVersion, ver)
|
return errors.Errorf(`maximum supported API version is %s: %s`, api.DefaultVersion, ver)
|
||||||
|
|
|
@ -33,6 +33,9 @@ const (
|
||||||
// OCI runtime being shipped with the docker daemon package.
|
// OCI runtime being shipped with the docker daemon package.
|
||||||
StockRuntimeName = "runc"
|
StockRuntimeName = "runc"
|
||||||
|
|
||||||
|
// minAPIVersion represents Minimum REST API version supported
|
||||||
|
minAPIVersion = "1.12"
|
||||||
|
|
||||||
// userlandProxyBinary is the name of the userland-proxy binary.
|
// userlandProxyBinary is the name of the userland-proxy binary.
|
||||||
// In rootless-mode, [rootless.RootlessKitDockerProxyBinary] is used instead.
|
// In rootless-mode, [rootless.RootlessKitDockerProxyBinary] is used instead.
|
||||||
userlandProxyBinary = "docker-proxy"
|
userlandProxyBinary = "docker-proxy"
|
||||||
|
@ -80,6 +83,7 @@ type Config struct {
|
||||||
Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"`
|
Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"`
|
||||||
CPURealtimePeriod int64 `json:"cpu-rt-period,omitempty"`
|
CPURealtimePeriod int64 `json:"cpu-rt-period,omitempty"`
|
||||||
CPURealtimeRuntime int64 `json:"cpu-rt-runtime,omitempty"`
|
CPURealtimeRuntime int64 `json:"cpu-rt-runtime,omitempty"`
|
||||||
|
OOMScoreAdjust int `json:"oom-score-adjust,omitempty"` // Deprecated: configure the daemon's oom-score-adjust using a process manager instead.
|
||||||
Init bool `json:"init,omitempty"`
|
Init bool `json:"init,omitempty"`
|
||||||
InitPath string `json:"init-path,omitempty"`
|
InitPath string `json:"init-path,omitempty"`
|
||||||
SeccompProfile string `json:"seccomp-profile,omitempty"`
|
SeccompProfile string `json:"seccomp-profile,omitempty"`
|
||||||
|
@ -174,6 +178,10 @@ func verifyDefaultCgroupNsMode(mode string) error {
|
||||||
|
|
||||||
// ValidatePlatformConfig checks if any platform-specific configuration settings are invalid.
|
// ValidatePlatformConfig checks if any platform-specific configuration settings are invalid.
|
||||||
func (conf *Config) ValidatePlatformConfig() error {
|
func (conf *Config) ValidatePlatformConfig() error {
|
||||||
|
if conf.OOMScoreAdjust != 0 {
|
||||||
|
return errors.New(`DEPRECATED: The "oom-score-adjust" config parameter and the dockerd "--oom-score-adjust" options have been removed.`)
|
||||||
|
}
|
||||||
|
|
||||||
if conf.EnableUserlandProxy {
|
if conf.EnableUserlandProxy {
|
||||||
if conf.UserlandProxyPath == "" {
|
if conf.UserlandProxyPath == "" {
|
||||||
return errors.New("invalid userland-proxy-path: userland-proxy is enabled, but userland-proxy-path is not set")
|
return errors.New("invalid userland-proxy-path: userland-proxy is enabled, but userland-proxy-path is not set")
|
||||||
|
@ -196,10 +204,6 @@ func (conf *Config) ValidatePlatformConfig() error {
|
||||||
return errors.Wrap(err, "invalid fixed-cidr-v6")
|
return errors.Wrap(err, "invalid fixed-cidr-v6")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := conf.Features["windows-dns-proxy"]; ok {
|
|
||||||
return errors.New("feature option 'windows-dns-proxy' is only available on Windows")
|
|
||||||
}
|
|
||||||
|
|
||||||
return verifyDefaultCgroupNsMode(conf.CgroupNamespaceMode)
|
return verifyDefaultCgroupNsMode(conf.CgroupNamespaceMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,6 +13,13 @@ const (
|
||||||
// default value. On Windows keep this empty so the value is auto-detected
|
// default value. On Windows keep this empty so the value is auto-detected
|
||||||
// based on other options.
|
// based on other options.
|
||||||
StockRuntimeName = ""
|
StockRuntimeName = ""
|
||||||
|
|
||||||
|
// minAPIVersion represents Minimum REST API version supported
|
||||||
|
// Technically the first daemon API version released on Windows is v1.25 in
|
||||||
|
// engine version 1.13. However, some clients are explicitly using downlevel
|
||||||
|
// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive.
|
||||||
|
// Hence also allowing 1.24 on Windows.
|
||||||
|
minAPIVersion string = "1.24"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BridgeConfig is meant to store all the parameters for both the bridge driver and the default bridge network. On
|
// BridgeConfig is meant to store all the parameters for both the bridge driver and the default bridge network. On
|
||||||
|
|
|
@ -364,6 +364,6 @@ func translateWorkingDir(config *containertypes.Config) error {
|
||||||
if !system.IsAbs(wd) {
|
if !system.IsAbs(wd) {
|
||||||
return fmt.Errorf("the working directory '%s' is invalid, it needs to be an absolute path", config.WorkingDir)
|
return fmt.Errorf("the working directory '%s' is invalid, it needs to be an absolute path", config.WorkingDir)
|
||||||
}
|
}
|
||||||
config.WorkingDir = filepath.Clean(wd)
|
config.WorkingDir = wd
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,8 +54,7 @@ func (daemon *Daemon) buildSandboxOptions(cfg *config.Config, container *contain
|
||||||
sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey())
|
sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add platform-specific Sandbox options.
|
if err := setupPathsAndSandboxOptions(container, cfg, &sboxOptions); err != nil {
|
||||||
if err := buildSandboxPlatformOptions(container, cfg, &sboxOptions); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -421,6 +420,9 @@ func (daemon *Daemon) updateContainerNetworkSettings(container *container.Contai
|
||||||
}
|
}
|
||||||
|
|
||||||
networkName := mode.NetworkName()
|
networkName := mode.NetworkName()
|
||||||
|
if mode.IsDefault() {
|
||||||
|
networkName = daemon.netController.Config().DefaultNetwork
|
||||||
|
}
|
||||||
|
|
||||||
if mode.IsUserDefined() {
|
if mode.IsUserDefined() {
|
||||||
var err error
|
var err error
|
||||||
|
@ -459,6 +461,15 @@ func (daemon *Daemon) updateContainerNetworkSettings(container *container.Contai
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Convert any settings added by client in default name to
|
||||||
|
// engine's default network name key
|
||||||
|
if mode.IsDefault() {
|
||||||
|
if nConf, ok := container.NetworkSettings.Networks[mode.NetworkName()]; ok {
|
||||||
|
container.NetworkSettings.Networks[networkName] = nConf
|
||||||
|
delete(container.NetworkSettings.Networks, mode.NetworkName())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if !mode.IsUserDefined() {
|
if !mode.IsUserDefined() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -99,45 +99,6 @@ func (daemon *Daemon) getPIDContainer(id string) (*container.Container, error) {
|
||||||
return ctr, nil
|
return ctr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// setupContainerDirs sets up base container directories (root, ipc, tmpfs and secrets).
|
|
||||||
func (daemon *Daemon) setupContainerDirs(c *container.Container) (_ []container.Mount, err error) {
|
|
||||||
if err := daemon.setupContainerMountsRoot(c); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := daemon.setupIPCDirs(c); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := daemon.setupSecretDir(c); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
daemon.cleanupSecretDir(c)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var ms []container.Mount
|
|
||||||
if !c.HostConfig.IpcMode.IsPrivate() && !c.HostConfig.IpcMode.IsEmpty() {
|
|
||||||
ms = append(ms, c.IpcMounts()...)
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpfsMounts, err := c.TmpfsMounts()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ms = append(ms, tmpfsMounts...)
|
|
||||||
|
|
||||||
secretMounts, err := c.SecretMounts()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ms = append(ms, secretMounts...)
|
|
||||||
|
|
||||||
return ms, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (daemon *Daemon) setupIPCDirs(c *container.Container) error {
|
func (daemon *Daemon) setupIPCDirs(c *container.Container) error {
|
||||||
ipcMode := c.HostConfig.IpcMode
|
ipcMode := c.HostConfig.IpcMode
|
||||||
|
|
||||||
|
@ -417,7 +378,7 @@ func serviceDiscoveryOnDefaultNetwork() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildSandboxPlatformOptions(container *container.Container, cfg *config.Config, sboxOptions *[]libnetwork.SandboxOption) error {
|
func setupPathsAndSandboxOptions(container *container.Container, cfg *config.Config, sboxOptions *[]libnetwork.SandboxOption) error {
|
||||||
var err error
|
var err error
|
||||||
var originResolvConfPath string
|
var originResolvConfPath string
|
||||||
|
|
||||||
|
@ -481,7 +442,6 @@ func buildSandboxPlatformOptions(container *container.Container, cfg *config.Con
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
*sboxOptions = append(*sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath))
|
*sboxOptions = append(*sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -163,13 +163,7 @@ func serviceDiscoveryOnDefaultNetwork() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildSandboxPlatformOptions(container *container.Container, cfg *config.Config, sboxOptions *[]libnetwork.SandboxOption) error {
|
func setupPathsAndSandboxOptions(container *container.Container, cfg *config.Config, sboxOptions *[]libnetwork.SandboxOption) error {
|
||||||
// By default, the Windows internal resolver does not forward requests to
|
|
||||||
// external resolvers - but forwarding can be enabled using feature flag
|
|
||||||
// "windows-dns-proxy":true.
|
|
||||||
if doproxy, exists := cfg.Features["windows-dns-proxy"]; !exists || !doproxy {
|
|
||||||
*sboxOptions = append(*sboxOptions, libnetwork.OptionDNSNoProxy())
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,226 +2,211 @@ package containerd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"reflect"
|
||||||
"errors"
|
"strings"
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/content"
|
|
||||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/containerd/log"
|
"github.com/containerd/log"
|
||||||
"github.com/docker/docker/api/types/backend"
|
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
|
imagetype "github.com/docker/docker/api/types/image"
|
||||||
"github.com/docker/docker/builder"
|
"github.com/docker/docker/builder"
|
||||||
"github.com/docker/docker/errdefs"
|
"github.com/docker/docker/errdefs"
|
||||||
"github.com/docker/docker/image"
|
"github.com/docker/docker/image"
|
||||||
"github.com/docker/docker/image/cache"
|
"github.com/docker/docker/image/cache"
|
||||||
"github.com/docker/docker/internal/multierror"
|
|
||||||
"github.com/docker/docker/layer"
|
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MakeImageCache creates a stateful image cache.
|
// MakeImageCache creates a stateful image cache.
|
||||||
func (i *ImageService) MakeImageCache(ctx context.Context, sourceRefs []string) (builder.ImageCache, error) {
|
func (i *ImageService) MakeImageCache(ctx context.Context, cacheFrom []string) (builder.ImageCache, error) {
|
||||||
return cache.New(ctx, cacheAdaptor{i}, sourceRefs)
|
images := []*image.Image{}
|
||||||
}
|
if len(cacheFrom) == 0 {
|
||||||
|
return &localCache{
|
||||||
type cacheAdaptor struct {
|
imageService: i,
|
||||||
is *ImageService
|
}, nil
|
||||||
}
|
|
||||||
|
|
||||||
func (c cacheAdaptor) Get(id image.ID) (*image.Image, error) {
|
|
||||||
ctx := context.TODO()
|
|
||||||
ref := id.String()
|
|
||||||
|
|
||||||
outImg, err := c.is.GetImage(ctx, id.String(), backend.GetImageOpts{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("GetImage: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
c8dImg, err := c.is.resolveImage(ctx, ref)
|
for _, c := range cacheFrom {
|
||||||
if err != nil {
|
h, err := i.ImageHistory(ctx, c)
|
||||||
return nil, fmt.Errorf("resolveImage: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var errFound = errors.New("success")
|
|
||||||
err = c.is.walkImageManifests(ctx, c8dImg, func(img *ImageManifest) error {
|
|
||||||
desc, err := img.Config(ctx)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).WithFields(log.Fields{
|
continue
|
||||||
"image": img,
|
|
||||||
"error": err,
|
|
||||||
}).Warn("failed to get config descriptor for image")
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
for _, hi := range h {
|
||||||
info, err := c.is.content.Info(ctx, desc.Digest)
|
if hi.ID != "<missing>" {
|
||||||
if err != nil {
|
im, err := i.GetImage(ctx, hi.ID, imagetype.GetImageOpts{})
|
||||||
if !cerrdefs.IsNotFound(err) {
|
if err != nil {
|
||||||
log.G(ctx).WithFields(log.Fields{
|
return nil, err
|
||||||
"image": img,
|
|
||||||
"desc": desc,
|
|
||||||
"error": err,
|
|
||||||
}).Warn("failed to get info of image config")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if dgstStr, ok := info.Labels[contentLabelGcRefContainerConfig]; ok {
|
|
||||||
dgst, err := digest.Parse(dgstStr)
|
|
||||||
if err != nil {
|
|
||||||
log.G(ctx).WithFields(log.Fields{
|
|
||||||
"label": contentLabelClassicBuilderImage,
|
|
||||||
"value": dgstStr,
|
|
||||||
"content": desc.Digest,
|
|
||||||
"error": err,
|
|
||||||
}).Warn("invalid digest in label")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
configDesc := ocispec.Descriptor{
|
|
||||||
Digest: dgst,
|
|
||||||
}
|
|
||||||
|
|
||||||
var config container.Config
|
|
||||||
if err := readConfig(ctx, c.is.content, configDesc, &config); err != nil {
|
|
||||||
if !errdefs.IsNotFound(err) {
|
|
||||||
log.G(ctx).WithFields(log.Fields{
|
|
||||||
"configDigest": dgst,
|
|
||||||
"error": err,
|
|
||||||
}).Warn("failed to read container config")
|
|
||||||
}
|
}
|
||||||
return nil
|
images = append(images, im)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &imageCache{
|
||||||
|
lc: &localCache{
|
||||||
|
imageService: i,
|
||||||
|
},
|
||||||
|
images: images,
|
||||||
|
imageService: i,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type localCache struct {
|
||||||
|
imageService *ImageService
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ic *localCache) GetCache(parentID string, cfg *container.Config, platform ocispec.Platform) (imageID string, err error) {
|
||||||
|
ctx := context.TODO()
|
||||||
|
|
||||||
|
var children []image.ID
|
||||||
|
|
||||||
|
// FROM scratch
|
||||||
|
if parentID == "" {
|
||||||
|
c, err := ic.imageService.getImagesWithLabel(ctx, imageLabelClassicBuilderFromScratch, "1")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
children = c
|
||||||
|
} else {
|
||||||
|
c, err := ic.imageService.Children(ctx, image.ID(parentID))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
children = c
|
||||||
|
}
|
||||||
|
|
||||||
|
var match *image.Image
|
||||||
|
for _, child := range children {
|
||||||
|
ccDigestStr, err := ic.imageService.getImageLabelByDigest(ctx, child.Digest(), imageLabelClassicBuilderContainerConfig)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if ccDigestStr == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
dgst, err := digest.Parse(ccDigestStr)
|
||||||
|
if err != nil {
|
||||||
|
log.G(ctx).WithError(err).Warnf("invalid container config digest: %q", ccDigestStr)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var cc container.Config
|
||||||
|
if err := readConfig(ctx, ic.imageService.content, ocispec.Descriptor{Digest: dgst}, &cc); err != nil {
|
||||||
|
if errdefs.IsNotFound(err) {
|
||||||
|
log.G(ctx).WithError(err).WithField("image", child).Warnf("missing container config: %q", ccDigestStr)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if cache.CompareConfig(&cc, cfg) {
|
||||||
|
childImage, err := ic.imageService.GetImage(ctx, child.String(), imagetype.GetImageOpts{Platform: &platform})
|
||||||
|
if err != nil {
|
||||||
|
if errdefs.IsNotFound(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
outImg.ContainerConfig = config
|
if childImage.Created != nil && (match == nil || match.Created.Before(*childImage.Created)) {
|
||||||
|
match = childImage
|
||||||
// We already have the config we looked for, so return an error to
|
}
|
||||||
// stop walking the image further. This error will be ignored.
|
|
||||||
return errFound
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil && err != errFound {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return outImg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c cacheAdaptor) GetByRef(ctx context.Context, refOrId string) (*image.Image, error) {
|
|
||||||
return c.is.GetImage(ctx, refOrId, backend.GetImageOpts{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c cacheAdaptor) SetParent(target, parent image.ID) error {
|
|
||||||
ctx := context.TODO()
|
|
||||||
_, imgs, err := c.is.resolveAllReferences(ctx, target.String())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to list images with digest %q", target)
|
|
||||||
}
|
|
||||||
|
|
||||||
var errs []error
|
|
||||||
is := c.is.images
|
|
||||||
for _, img := range imgs {
|
|
||||||
if img.Labels == nil {
|
|
||||||
img.Labels = make(map[string]string)
|
|
||||||
}
|
|
||||||
img.Labels[imageLabelClassicBuilderParent] = parent.String()
|
|
||||||
if _, err := is.Update(ctx, img, "labels."+imageLabelClassicBuilderParent); err != nil {
|
|
||||||
errs = append(errs, fmt.Errorf("failed to update parent label on image %v: %w", img, err))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return multierror.Join(errs...)
|
if match == nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return match.ID().String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c cacheAdaptor) GetParent(target image.ID) (image.ID, error) {
|
type imageCache struct {
|
||||||
ctx := context.TODO()
|
images []*image.Image
|
||||||
value, err := c.is.getImageLabelByDigest(ctx, target.Digest(), imageLabelClassicBuilderParent)
|
imageService *ImageService
|
||||||
if err != nil {
|
lc *localCache
|
||||||
return "", fmt.Errorf("failed to read parent image: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dgst, err := digest.Parse(value)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("invalid parent value: %q", value)
|
|
||||||
}
|
|
||||||
|
|
||||||
return image.ID(dgst), nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c cacheAdaptor) Create(parent *image.Image, target image.Image, extraLayer layer.DiffID) (image.ID, error) {
|
func (ic *imageCache) GetCache(parentID string, cfg *container.Config, platform ocispec.Platform) (imageID string, err error) {
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
data, err := json.Marshal(target)
|
|
||||||
|
imgID, err := ic.lc.GetCache(parentID, cfg, platform)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to marshal image config: %w", err)
|
return "", err
|
||||||
|
}
|
||||||
|
if imgID != "" {
|
||||||
|
for _, s := range ic.images {
|
||||||
|
if ic.isParent(ctx, s, image.ID(imgID)) {
|
||||||
|
return imgID, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var layerDigest digest.Digest
|
var parent *image.Image
|
||||||
if extraLayer != "" {
|
lenHistory := 0
|
||||||
info, err := findContentByUncompressedDigest(ctx, c.is.client.ContentStore(), digest.Digest(extraLayer))
|
|
||||||
|
if parentID != "" {
|
||||||
|
parent, err = ic.imageService.GetImage(ctx, parentID, imagetype.GetImageOpts{Platform: &platform})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("failed to find content for diff ID %q: %w", extraLayer, err)
|
return "", err
|
||||||
}
|
}
|
||||||
layerDigest = info.Digest
|
lenHistory = len(parent.History)
|
||||||
}
|
}
|
||||||
|
for _, target := range ic.images {
|
||||||
var parentRef string
|
if !isValidParent(target, parent) || !isValidConfig(cfg, target.History[lenHistory]) {
|
||||||
if parent != nil {
|
continue
|
||||||
parentRef = parent.ID().String()
|
|
||||||
}
|
|
||||||
img, err := c.is.CreateImage(ctx, data, parentRef, layerDigest)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("failed to created cached image: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return image.ID(img.ImageID()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c cacheAdaptor) IsBuiltLocally(target image.ID) (bool, error) {
|
|
||||||
ctx := context.TODO()
|
|
||||||
value, err := c.is.getImageLabelByDigest(ctx, target.Digest(), imageLabelClassicBuilderContainerConfig)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("failed to read container config label: %w", err)
|
|
||||||
}
|
|
||||||
return value != "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c cacheAdaptor) Children(id image.ID) []image.ID {
|
|
||||||
ctx := context.TODO()
|
|
||||||
|
|
||||||
if id.String() == "" {
|
|
||||||
imgs, err := c.is.getImagesWithLabel(ctx, imageLabelClassicBuilderFromScratch, "1")
|
|
||||||
if err != nil {
|
|
||||||
log.G(ctx).WithError(err).Error("failed to get from scratch images")
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
return imgs
|
return target.ID().String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
imgs, err := c.is.Children(ctx, id)
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidConfig(cfg *container.Config, h image.History) bool {
|
||||||
|
// todo: make this format better than join that loses data
|
||||||
|
return strings.Join(cfg.Cmd, " ") == h.CreatedBy
|
||||||
|
}
|
||||||
|
|
||||||
|
func isValidParent(img, parent *image.Image) bool {
|
||||||
|
if len(img.History) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if parent == nil || len(parent.History) == 0 && len(parent.RootFS.DiffIDs) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if len(parent.History) >= len(img.History) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(parent.RootFS.DiffIDs) > len(img.RootFS.DiffIDs) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, h := range parent.History {
|
||||||
|
if !reflect.DeepEqual(h, img.History[i]) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i, d := range parent.RootFS.DiffIDs {
|
||||||
|
if d != img.RootFS.DiffIDs[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ic *imageCache) isParent(ctx context.Context, img *image.Image, parentID image.ID) bool {
|
||||||
|
ii, err := ic.imageService.resolveImage(ctx, img.ImageID())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.G(ctx).WithError(err).Error("failed to get image children")
|
return false
|
||||||
return nil
|
}
|
||||||
|
parent, ok := ii.Labels[imageLabelClassicBuilderParent]
|
||||||
|
if ok {
|
||||||
|
return parent == parentID.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
return imgs
|
p, err := ic.imageService.GetImage(ctx, parentID.String(), imagetype.GetImageOpts{})
|
||||||
}
|
if err != nil {
|
||||||
|
return false
|
||||||
func findContentByUncompressedDigest(ctx context.Context, cs content.Manager, uncompressed digest.Digest) (content.Info, error) {
|
}
|
||||||
var out content.Info
|
return ic.isParent(ctx, p, parentID)
|
||||||
|
|
||||||
errStopWalk := errors.New("success")
|
|
||||||
err := cs.Walk(ctx, func(i content.Info) error {
|
|
||||||
out = i
|
|
||||||
return errStopWalk
|
|
||||||
}, `labels."containerd.io/uncompressed"==`+uncompressed.String())
|
|
||||||
|
|
||||||
if err != nil && err != errStopWalk {
|
|
||||||
return out, err
|
|
||||||
}
|
|
||||||
if out.Digest == "" {
|
|
||||||
return out, errdefs.NotFound(errors.New("no content matches this uncompressed digest"))
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue