Compare commits
74 commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
659604f9ee | ||
![]() |
6660133ffb | ||
![]() |
67b3563d09 | ||
![]() |
7a4ea19803 | ||
![]() |
ae6e9333c0 | ||
![]() |
0d9acd24fe | ||
![]() |
37bc639704 | ||
![]() |
04eccf8165 | ||
![]() |
24722779ff | ||
![]() |
9d8acb7bd1 | ||
![]() |
4b78458e4b | ||
![]() |
d64bab35ee | ||
![]() |
329d671aef | ||
![]() |
4cc2081119 | ||
![]() |
27df42255c | ||
![]() |
9ee7d30aef | ||
![]() |
8a4b7c5af8 | ||
![]() |
7d50989467 | ||
![]() |
a753ca64e2 | ||
![]() |
ac1c329245 | ||
![]() |
5276c2b6e0 | ||
![]() |
1b0d37bdc2 | ||
![]() |
baf1fd1c3f | ||
![]() |
992dc33fc5 | ||
![]() |
ef1545ed4a | ||
![]() |
876f5eda51 | ||
![]() |
463850e59e | ||
![]() |
47a3dad256 | ||
![]() |
a0bc3ebae4 | ||
![]() |
922b6aa672 | ||
![]() |
0e605cf972 | ||
![]() |
878c41791b | ||
![]() |
654e80abc2 | ||
![]() |
0869b089e4 | ||
![]() |
3467ba6451 | ||
![]() |
f9b886c01b | ||
![]() |
07140c0eca | ||
![]() |
d5ad186d49 | ||
![]() |
4d924c35f7 | ||
![]() |
ea662c5c8a | ||
![]() |
68b7ba0d03 | ||
![]() |
821e4ec4c7 | ||
![]() |
5ea7b8d091 | ||
![]() |
1331b8c39a | ||
![]() |
907f037141 | ||
![]() |
a5b597ea51 | ||
![]() |
8bbfa32741 | ||
![]() |
807e415260 | ||
![]() |
8587a1c617 | ||
![]() |
9717369913 | ||
![]() |
ed0c147c8f | ||
![]() |
90be9ab802 | ||
![]() |
d73f7031e0 | ||
![]() |
ea7f7f168e | ||
![]() |
233c49438b | ||
![]() |
2b7424512a | ||
![]() |
f77a3274b4 | ||
![]() |
c76bb6a3a3 | ||
![]() |
71846e82c1 | ||
![]() |
ecbc27aa22 | ||
![]() |
c01f02cfcb | ||
![]() |
ce79cd19f6 | ||
![]() |
1235338836 | ||
![]() |
763d2b7996 | ||
![]() |
e9eff01dca | ||
![]() |
69ef9a7f90 | ||
![]() |
86770904be | ||
![]() |
31b98f9502 | ||
![]() |
bfffb0974e | ||
![]() |
e28bc0d271 | ||
![]() |
d169a57306 | ||
![]() |
63640838ba | ||
![]() |
269e55a915 | ||
![]() |
012dd239ce |
104 changed files with 2097 additions and 602 deletions
110
.github/workflows/bin-image.yml
vendored
Normal file
110
.github/workflows/bin-image.yml
vendored
Normal file
|
@ -0,0 +1,110 @@
|
|||
name: bin-image
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]+'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
PLATFORM: Moby Engine
|
||||
PRODUCT: Moby
|
||||
DEFAULT_PRODUCT_LICENSE: Moby
|
||||
PACKAGER_NAME: Moby
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
uses: ./.github/workflows/.dco.yml
|
||||
|
||||
prepare:
|
||||
runs-on: ubuntu-20.04
|
||||
outputs:
|
||||
platforms: ${{ steps.platforms.outputs.matrix }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Create platforms matrix
|
||||
id: platforms
|
||||
run: |
|
||||
echo "matrix=$(docker buildx bake bin-image-cross --print | jq -cr '.target."bin-image-cross".platforms')" >>${GITHUB_OUTPUT}
|
||||
-
|
||||
name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: moby-bin
|
||||
### versioning strategy
|
||||
## push semver tag v23.0.0
|
||||
# moby/moby-bin:23.0.0
|
||||
# moby/moby-bin:latest
|
||||
## push semver prelease tag v23.0.0-beta.1
|
||||
# moby/moby-bin:23.0.0-beta.1
|
||||
## push on master
|
||||
# moby/moby-bin:master
|
||||
## push on 23.0 branch
|
||||
# moby/moby-bin:23.0
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
-
|
||||
name: Rename meta bake definition file
|
||||
run: |
|
||||
mv "${{ steps.meta.outputs.bake-file }}" "/tmp/bake-meta.json"
|
||||
-
|
||||
name: Upload meta bake definition
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: bake-meta
|
||||
path: /tmp/bake-meta.json
|
||||
if-no-files-found: error
|
||||
retention-days: 1
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
needs:
|
||||
- validate-dco
|
||||
- prepare
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
platform: ${{ fromJson(needs.prepare.outputs.platforms) }}
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Download meta bake definition
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: bake-meta
|
||||
path: /tmp
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v2
|
||||
with:
|
||||
files: |
|
||||
./docker-bake.hcl
|
||||
/tmp/bake-meta.json
|
||||
targets: bin-image
|
||||
set: |
|
||||
*.platform=${{ matrix.platform }}
|
||||
*.output=type=cacheonly
|
|
@ -192,7 +192,7 @@ RUN git init . && git remote add origin "https://github.com/containerd/container
|
|||
# When updating the binary version you may also need to update the vendor
|
||||
# version to pick up bug fixes or new APIs, however, usually the Go packages
|
||||
# are built from a commit from the master branch.
|
||||
ARG CONTAINERD_VERSION=v1.7.0
|
||||
ARG CONTAINERD_VERSION=v1.7.1
|
||||
RUN git fetch -q --depth 1 origin "${CONTAINERD_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS containerd-build
|
||||
|
@ -447,7 +447,12 @@ COPY --from=tomll /build/ /usr/local/bin/
|
|||
COPY --from=gowinres /build/ /usr/local/bin/
|
||||
COPY --from=tini /build/ /usr/local/bin/
|
||||
COPY --from=registry /build/ /usr/local/bin/
|
||||
COPY --from=criu /build/ /usr/local/bin/
|
||||
|
||||
# Skip the CRIU stage for now, as the opensuse package repository is sometimes
|
||||
# unstable, and we're currently not using it in CI.
|
||||
#
|
||||
# FIXME(thaJeztah): re-enable this stage when https://github.com/moby/moby/issues/38963 is resolved (see https://github.com/moby/moby/pull/38984)
|
||||
# COPY --from=criu /build/ /usr/local/bin/
|
||||
COPY --from=gotestsum /build/ /usr/local/bin/
|
||||
COPY --from=golangci_lint /build/ /usr/local/bin/
|
||||
COPY --from=shfmt /build/ /usr/local/bin/
|
||||
|
|
|
@ -71,8 +71,8 @@ RUN apk --no-cache add \
|
|||
tar \
|
||||
xz
|
||||
|
||||
COPY hack/test/e2e-run.sh /scripts/run.sh
|
||||
COPY hack/make/.ensure-emptyfs /scripts/ensure-emptyfs.sh
|
||||
COPY hack/test/e2e-run.sh /scripts/run.sh
|
||||
COPY hack/make/.build-empty-images /scripts/build-empty-images.sh
|
||||
|
||||
COPY integration/testdata /tests/integration/testdata
|
||||
COPY integration/build/testdata /tests/integration/build/testdata
|
||||
|
|
|
@ -168,7 +168,7 @@ SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPref
|
|||
ARG GO_VERSION=1.20.4
|
||||
ARG GOTESTSUM_VERSION=v1.8.2
|
||||
ARG GOWINRES_VERSION=v0.3.0
|
||||
ARG CONTAINERD_VERSION=v1.7.0
|
||||
ARG CONTAINERD_VERSION=v1.7.1
|
||||
|
||||
# Environment variable notes:
|
||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||
|
|
|
@ -44,7 +44,7 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter,
|
|||
}
|
||||
|
||||
config, _, _, err := s.decoder.DecodeConfig(r.Body)
|
||||
if err != nil && err != io.EOF { // Do not fail if body is empty.
|
||||
if err != nil && !errors.Is(err, io.EOF) { // Do not fail if body is empty.
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -486,6 +486,9 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo
|
|||
|
||||
config, hostConfig, networkingConfig, err := s.decoder.DecodeConfig(r.Body)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return errdefs.InvalidParameter(errors.New("invalid JSON: got EOF while reading request body"))
|
||||
}
|
||||
return err
|
||||
}
|
||||
version := httputils.VersionFromContext(ctx)
|
||||
|
|
|
@ -282,6 +282,14 @@ func (ir *imageRouter) toImageInspect(img *image.Image) (*types.ImageInspect, er
|
|||
comment = img.History[len(img.History)-1].Comment
|
||||
}
|
||||
|
||||
// Make sure we output empty arrays instead of nil.
|
||||
if repoTags == nil {
|
||||
repoTags = []string{}
|
||||
}
|
||||
if repoDigests == nil {
|
||||
repoDigests = []string{}
|
||||
}
|
||||
|
||||
return &types.ImageInspect{
|
||||
ID: img.ID().String(),
|
||||
RepoTags: repoTags,
|
||||
|
|
|
@ -78,6 +78,7 @@ var cacheFields = map[string]bool{
|
|||
type Opt struct {
|
||||
SessionManager *session.Manager
|
||||
Root string
|
||||
EngineID string
|
||||
Dist images.DistributionServices
|
||||
ImageTagger mobyexporter.ImageTagger
|
||||
NetworkController *libnetwork.Controller
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/localinlinecache"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/snapshot"
|
||||
"github.com/docker/docker/builder/builder-next/exporter"
|
||||
"github.com/docker/docker/builder/builder-next/exporter/mobyexporter"
|
||||
"github.com/docker/docker/builder/builder-next/imagerefchecker"
|
||||
mobyworker "github.com/docker/docker/builder/builder-next/worker"
|
||||
|
@ -312,7 +311,7 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
|
|||
}
|
||||
|
||||
wopt := mobyworker.Opt{
|
||||
ID: exporter.Moby,
|
||||
ID: opt.EngineID,
|
||||
ContentStore: store,
|
||||
CacheManager: cm,
|
||||
GCPolicy: gcPolicy,
|
||||
|
|
|
@ -50,7 +50,7 @@ import (
|
|||
)
|
||||
|
||||
func init() {
|
||||
version.Version = "v0.11.6"
|
||||
version.Version = "v0.11.7-0.20230525183624-798ad6b0ce9f"
|
||||
}
|
||||
|
||||
const labelCreatedAt = "buildkit/createdat"
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -45,7 +46,7 @@ type Backend interface {
|
|||
// ContainerCreateWorkdir creates the workdir
|
||||
ContainerCreateWorkdir(containerID string) error
|
||||
|
||||
CreateImage(config []byte, parent string) (Image, error)
|
||||
CreateImage(ctx context.Context, config []byte, parent string, contentStoreDigest digest.Digest) (Image, error)
|
||||
|
||||
ImageCacheBuilder
|
||||
}
|
||||
|
@ -104,6 +105,7 @@ type ROLayer interface {
|
|||
Release() error
|
||||
NewRWLayer() (RWLayer, error)
|
||||
DiffID() layer.DiffID
|
||||
ContentStoreDigest() digest.Digest
|
||||
}
|
||||
|
||||
// RWLayer is active layer that can be read/modified
|
||||
|
|
|
@ -21,8 +21,11 @@ type dispatchTestCase struct {
|
|||
files map[string]string
|
||||
}
|
||||
|
||||
func init() {
|
||||
reexec.Init()
|
||||
func TestMain(m *testing.M) {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestDispatch(t *testing.T) {
|
||||
|
|
|
@ -63,7 +63,7 @@ func (b *Builder) commitContainer(ctx context.Context, dispatchState *dispatchSt
|
|||
return err
|
||||
}
|
||||
|
||||
func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, parent builder.Image, runConfig *container.Config) error {
|
||||
func (b *Builder) exportImage(ctx context.Context, state *dispatchState, layer builder.RWLayer, parent builder.Image, runConfig *container.Config) error {
|
||||
newLayer, err := layer.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -98,7 +98,15 @@ func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, paren
|
|||
return errors.Wrap(err, "failed to encode image config")
|
||||
}
|
||||
|
||||
exportedImage, err := b.docker.CreateImage(config, state.imageID)
|
||||
// when writing the new image's manifest, we now need to pass in the new layer's digest.
|
||||
// before the containerd store work this was unnecessary since we get the layer id
|
||||
// from the image's RootFS ChainID -- see:
|
||||
// https://github.com/moby/moby/blob/8cf66ed7322fa885ef99c4c044fa23e1727301dc/image/store.go#L162
|
||||
// however, with the containerd store we can't do this. An alternative implementation here
|
||||
// without changing the signature would be to get the layer digest by walking the content store
|
||||
// and filtering the objects to find the layer with the DiffID we want, but that has performance
|
||||
// implications that should be called out/investigated
|
||||
exportedImage, err := b.docker.CreateImage(ctx, config, state.imageID, newLayer.ContentStoreDigest())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to export image")
|
||||
}
|
||||
|
@ -170,7 +178,7 @@ func (b *Builder) performCopy(ctx context.Context, req dispatchRequest, inst cop
|
|||
return errors.Wrapf(err, "failed to copy files")
|
||||
}
|
||||
}
|
||||
return b.exportImage(state, rwLayer, imageMount.Image(), runConfigWithCommentCmd)
|
||||
return b.exportImage(ctx, state, rwLayer, imageMount.Image(), runConfigWithCommentCmd)
|
||||
}
|
||||
|
||||
func createDestInfo(workingDir string, inst copyInstruction, rwLayer builder.RWLayer, platform string) (copyInfo, error) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
@ -193,6 +194,7 @@ type MockROLayer struct {
|
|||
diffID layer.DiffID
|
||||
}
|
||||
|
||||
func (l *MockROLayer) ContentStoreDigest() digest.Digest { return "" }
|
||||
func (l *MockROLayer) Release() error { return nil }
|
||||
func (l *MockROLayer) NewRWLayer() (builder.RWLayer, error) { return nil, nil }
|
||||
func (l *MockROLayer) DiffID() layer.DiffID { return l.diffID }
|
||||
|
@ -217,6 +219,6 @@ func TestExportImage(t *testing.T) {
|
|||
imageSources: getMockImageSource(nil, nil, nil),
|
||||
docker: getMockBuildBackend(),
|
||||
}
|
||||
err := b.exportImage(ds, layer, parentImage, runConfig)
|
||||
err := b.exportImage(context.TODO(), ds, layer, parentImage, runConfig)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// MockBackend implements the builder.Backend interface for unit testing
|
||||
|
@ -80,7 +81,7 @@ func (m *MockBackend) MakeImageCache(ctx context.Context, cacheFrom []string) (b
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockBackend) CreateImage(config []byte, parent string) (builder.Image, error) {
|
||||
func (m *MockBackend) CreateImage(ctx context.Context, config []byte, parent string, layerDigest digest.Digest) (builder.Image, error) {
|
||||
return &mockImage{id: "test"}, nil
|
||||
}
|
||||
|
||||
|
@ -119,6 +120,10 @@ func (mic *mockImageCache) GetCache(parentID string, cfg *container.Config) (str
|
|||
|
||||
type mockLayer struct{}
|
||||
|
||||
func (l *mockLayer) ContentStoreDigest() digest.Digest {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (l *mockLayer) Release() error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,8 +17,11 @@ const (
|
|||
contents = "contents test"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Init()
|
||||
func TestMain(m *testing.M) {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestCloseRootDirectory(t *testing.T) {
|
||||
|
|
|
@ -351,6 +351,7 @@ func newRouterOptions(ctx context.Context, config *config.Config, d *daemon.Daem
|
|||
bk, err := buildkit.New(ctx, buildkit.Opt{
|
||||
SessionManager: sm,
|
||||
Root: filepath.Join(config.Root, "buildkit"),
|
||||
EngineID: d.ID(),
|
||||
Dist: d.DistributionServices(),
|
||||
ImageTagger: d.ImageService(),
|
||||
NetworkController: d.NetworkController(),
|
||||
|
|
|
@ -6,13 +6,9 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
"text/template"
|
||||
|
||||
"github.com/docker/docker/pkg/aaparser"
|
||||
)
|
||||
|
||||
type profileData struct {
|
||||
Version int
|
||||
}
|
||||
type profileData struct{}
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 2 {
|
||||
|
@ -22,15 +18,6 @@ func main() {
|
|||
// parse the arg
|
||||
apparmorProfilePath := os.Args[1]
|
||||
|
||||
version, err := aaparser.GetVersion()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
data := profileData{
|
||||
Version: version,
|
||||
}
|
||||
fmt.Printf("apparmor_parser is of version %+v\n", data)
|
||||
|
||||
// parse the template
|
||||
compiled, err := template.New("apparmor_profile").Parse(dockerProfileTemplate)
|
||||
if err != nil {
|
||||
|
@ -48,6 +35,7 @@ func main() {
|
|||
}
|
||||
defer f.Close()
|
||||
|
||||
data := profileData{}
|
||||
if err := compiled.Execute(f, data); err != nil {
|
||||
log.Fatalf("executing template failed: %v", err)
|
||||
}
|
||||
|
|
|
@ -149,9 +149,7 @@ profile /usr/bin/docker (attach_disconnected, complain) {
|
|||
}
|
||||
# xz works via pipes, so we do not need access to the filesystem.
|
||||
profile /usr/bin/xz (complain) {
|
||||
{{if ge .Version 209000}}
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
{{end}}
|
||||
/etc/ld.so.cache r,
|
||||
/lib/** rm,
|
||||
/usr/bin/xz rm,
|
||||
|
|
|
@ -2,11 +2,86 @@ package containerd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
imagetype "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/image"
|
||||
)
|
||||
|
||||
// MakeImageCache creates a stateful image cache.
|
||||
func (i *ImageService) MakeImageCache(ctx context.Context, cacheFrom []string) (builder.ImageCache, error) {
|
||||
panic("not implemented")
|
||||
images := []*image.Image{}
|
||||
for _, c := range cacheFrom {
|
||||
im, err := i.GetImage(ctx, c, imagetype.GetImageOpts{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
images = append(images, im)
|
||||
}
|
||||
return &imageCache{images: images, c: i}, nil
|
||||
}
|
||||
|
||||
type imageCache struct {
|
||||
images []*image.Image
|
||||
c *ImageService
|
||||
}
|
||||
|
||||
func (ic *imageCache) GetCache(parentID string, cfg *container.Config) (imageID string, err error) {
|
||||
ctx := context.TODO()
|
||||
parent, err := ic.c.GetImage(ctx, parentID, imagetype.GetImageOpts{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, localCachedImage := range ic.images {
|
||||
if isMatch(localCachedImage, parent, cfg) {
|
||||
return localCachedImage.ID().String(), nil
|
||||
}
|
||||
}
|
||||
|
||||
children, err := ic.c.Children(ctx, parent.ID())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, children := range children {
|
||||
childImage, err := ic.c.GetImage(ctx, children.String(), imagetype.GetImageOpts{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if isMatch(childImage, parent, cfg) {
|
||||
return children.String(), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// isMatch checks whether a given target can be used as cache for the given
|
||||
// parent image/config combination.
|
||||
// A target can only be an immediate child of the given parent image. For
|
||||
// a parent image with `n` history entries, a valid target must have `n+1`
|
||||
// entries and the extra entry must match the provided config
|
||||
func isMatch(target, parent *image.Image, cfg *container.Config) bool {
|
||||
if target == nil || parent == nil || cfg == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(target.History) != len(parent.History)+1 ||
|
||||
len(target.RootFS.DiffIDs) != len(parent.RootFS.DiffIDs)+1 {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range parent.History {
|
||||
if !reflect.DeepEqual(parent.History[i], target.History[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
childCreatedBy := target.History[len(target.History)-1].CreatedBy
|
||||
return childCreatedBy == strings.Join(cfg.Cmd, " ")
|
||||
}
|
||||
|
|
|
@ -68,11 +68,30 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima
|
|||
exposedPorts[nat.Port(k)] = v
|
||||
}
|
||||
|
||||
derefTimeSafely := func(t *time.Time) time.Time {
|
||||
if t != nil {
|
||||
return *t
|
||||
}
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
var imgHistory []image.History
|
||||
for _, h := range ociimage.History {
|
||||
imgHistory = append(imgHistory, image.History{
|
||||
Created: derefTimeSafely(h.Created),
|
||||
Author: h.Author,
|
||||
CreatedBy: h.CreatedBy,
|
||||
Comment: h.Comment,
|
||||
EmptyLayer: h.EmptyLayer,
|
||||
})
|
||||
}
|
||||
|
||||
img := image.NewImage(image.ID(desc.Digest))
|
||||
img.V1Image = image.V1Image{
|
||||
ID: string(desc.Digest),
|
||||
OS: ociimage.OS,
|
||||
Architecture: ociimage.Architecture,
|
||||
Created: derefTimeSafely(ociimage.Created),
|
||||
Config: &containertypes.Config{
|
||||
Entrypoint: ociimage.Config.Entrypoint,
|
||||
Env: ociimage.Config.Env,
|
||||
|
@ -87,6 +106,7 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima
|
|||
}
|
||||
|
||||
img.RootFS = rootfs
|
||||
img.History = imgHistory
|
||||
|
||||
if options.Details {
|
||||
lastUpdated := time.Unix(0, 0)
|
||||
|
|
|
@ -2,23 +2,510 @@ package containerd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/rootfs"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
imagetypes "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
registrypkg "github.com/docker/docker/registry"
|
||||
|
||||
// "github.com/docker/docker/api/types/container"
|
||||
containerdimages "github.com/containerd/containerd/images"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/errdefs"
|
||||
dimage "github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetImageAndReleasableLayer returns an image and releaseable layer for a
|
||||
// reference or ID. Every call to GetImageAndReleasableLayer MUST call
|
||||
// releasableLayer.Release() to prevent leaking of layers.
|
||||
func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ROLayer, error) {
|
||||
return nil, nil, errdefs.NotImplemented(errors.New("not implemented"))
|
||||
if refOrID == "" { // from SCRATCH
|
||||
os := runtime.GOOS
|
||||
if runtime.GOOS == "windows" {
|
||||
os = "linux"
|
||||
}
|
||||
if opts.Platform != nil {
|
||||
os = opts.Platform.OS
|
||||
}
|
||||
if !system.IsOSSupported(os) {
|
||||
return nil, nil, system.ErrNotSupportedOperatingSystem
|
||||
}
|
||||
return nil, &rolayer{
|
||||
key: "",
|
||||
c: i.client,
|
||||
snapshotter: i.snapshotter,
|
||||
diffID: "",
|
||||
root: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
if opts.PullOption != backend.PullOptionForcePull {
|
||||
// TODO(laurazard): same as below
|
||||
img, err := i.GetImage(ctx, refOrID, image.GetImageOpts{Platform: opts.Platform})
|
||||
if err != nil && opts.PullOption == backend.PullOptionNoPull {
|
||||
return nil, nil, err
|
||||
}
|
||||
imgDesc, err := i.resolveDescriptor(ctx, refOrID)
|
||||
if err != nil && !errdefs.IsNotFound(err) {
|
||||
return nil, nil, err
|
||||
}
|
||||
if img != nil {
|
||||
if !system.IsOSSupported(img.OperatingSystem()) {
|
||||
return nil, nil, system.ErrNotSupportedOperatingSystem
|
||||
}
|
||||
|
||||
layer, err := newROLayerForImage(ctx, &imgDesc, i, opts, refOrID, opts.Platform)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return img, layer, nil
|
||||
}
|
||||
}
|
||||
|
||||
ctx, _, err := i.client.WithLease(ctx, leases.WithRandomID(), leases.WithExpiration(1*time.Hour))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create lease for commit: %w", err)
|
||||
}
|
||||
|
||||
// TODO(laurazard): do we really need a new method here to pull the image?
|
||||
imgDesc, err := i.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output, opts.Platform)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// TODO(laurazard): pullForBuilder should return whatever we
|
||||
// need here instead of having to go and get it again
|
||||
img, err := i.GetImage(ctx, refOrID, imagetypes.GetImageOpts{
|
||||
Platform: opts.Platform,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
layer, err := newROLayerForImage(ctx, imgDesc, i, opts, refOrID, opts.Platform)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return img, layer, nil
|
||||
}
|
||||
|
||||
func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConfigs map[string]registry.AuthConfig, output io.Writer, platform *ocispec.Platform) (*ocispec.Descriptor, error) {
|
||||
ref, err := reference.ParseNormalizedNamed(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
taggedRef := reference.TagNameOnly(ref)
|
||||
|
||||
pullRegistryAuth := ®istry.AuthConfig{}
|
||||
if len(authConfigs) > 0 {
|
||||
// The request came with a full auth config, use it
|
||||
repoInfo, err := i.registryService.ResolveRepository(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resolvedConfig := registrypkg.ResolveAuthConfig(authConfigs, repoInfo.Index)
|
||||
pullRegistryAuth = &resolvedConfig
|
||||
}
|
||||
|
||||
if err := i.PullImage(ctx, ref.Name(), taggedRef.(reference.NamedTagged).Tag(), platform, nil, pullRegistryAuth, output); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
img, err := i.GetImage(ctx, name, imagetypes.GetImageOpts{Platform: platform})
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) && img != nil && platform != nil {
|
||||
imgPlat := ocispec.Platform{
|
||||
OS: img.OS,
|
||||
Architecture: img.BaseImgArch(),
|
||||
Variant: img.BaseImgVariant(),
|
||||
}
|
||||
|
||||
p := *platform
|
||||
if !platforms.Only(p).Match(imgPlat) {
|
||||
po := streamformatter.NewJSONProgressOutput(output, false)
|
||||
progress.Messagef(po, "", `
|
||||
WARNING: Pulled image with specified platform (%s), but the resulting image's configured platform (%s) does not match.
|
||||
This is most likely caused by a bug in the build system that created the fetched image (%s).
|
||||
Please notify the image author to correct the configuration.`,
|
||||
platforms.Format(p), platforms.Format(imgPlat), name,
|
||||
)
|
||||
logrus.WithError(err).WithField("image", name).Warn("Ignoring error about platform mismatch where the manifest list points to an image whose configuration does not match the platform in the manifest.")
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !system.IsOSSupported(img.OperatingSystem()) {
|
||||
return nil, system.ErrNotSupportedOperatingSystem
|
||||
}
|
||||
|
||||
imgDesc, err := i.resolveDescriptor(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &imgDesc, err
|
||||
}
|
||||
|
||||
func newROLayerForImage(ctx context.Context, imgDesc *ocispec.Descriptor, i *ImageService, opts backend.GetImageAndLayerOptions, refOrID string, platform *ocispec.Platform) (builder.ROLayer, error) {
|
||||
if imgDesc == nil {
|
||||
return nil, fmt.Errorf("can't make an RO layer for a nil image :'(")
|
||||
}
|
||||
|
||||
platMatcher := platforms.Default()
|
||||
if platform != nil {
|
||||
platMatcher = platforms.Only(*platform)
|
||||
}
|
||||
|
||||
// this needs it's own context + lease so that it doesn't get cleaned before we're ready
|
||||
confDesc, err := containerdimages.Config(ctx, i.client.ContentStore(), *imgDesc, platMatcher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diffIDs, err := containerdimages.RootFS(ctx, i.client.ContentStore(), confDesc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parent := identity.ChainID(diffIDs).String()
|
||||
|
||||
s := i.client.SnapshotService(i.snapshotter)
|
||||
key := stringid.GenerateRandomID()
|
||||
ctx, _, err = i.client.WithLease(ctx, leases.WithRandomID(), leases.WithExpiration(1*time.Hour))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create lease for commit: %w", err)
|
||||
}
|
||||
mounts, err := s.View(ctx, key, parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tempMountLocation := os.TempDir()
|
||||
root, err := os.MkdirTemp(tempMountLocation, "rootfs-mount")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := mount.All(mounts, root); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &rolayer{
|
||||
key: key,
|
||||
c: i.client,
|
||||
snapshotter: i.snapshotter,
|
||||
diffID: digest.Digest(parent),
|
||||
root: root,
|
||||
contentStoreDigest: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
type rolayer struct {
|
||||
key string
|
||||
c *containerd.Client
|
||||
snapshotter string
|
||||
diffID digest.Digest
|
||||
root string
|
||||
contentStoreDigest digest.Digest
|
||||
}
|
||||
|
||||
func (rl *rolayer) ContentStoreDigest() digest.Digest {
|
||||
return rl.contentStoreDigest
|
||||
}
|
||||
|
||||
func (rl *rolayer) DiffID() layer.DiffID {
|
||||
if rl.diffID == "" {
|
||||
return layer.DigestSHA256EmptyTar
|
||||
}
|
||||
return layer.DiffID(rl.diffID)
|
||||
}
|
||||
|
||||
func (rl *rolayer) Release() error {
|
||||
snapshotter := rl.c.SnapshotService(rl.snapshotter)
|
||||
err := snapshotter.Remove(context.TODO(), rl.key)
|
||||
if err != nil && !cerrdefs.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if rl.root == "" { // nothing to release
|
||||
return nil
|
||||
}
|
||||
if err := mount.UnmountAll(rl.root, 0); err != nil {
|
||||
logrus.WithError(err).WithField("root", rl.root).Error("failed to unmount ROLayer")
|
||||
return err
|
||||
}
|
||||
if err := os.Remove(rl.root); err != nil {
|
||||
logrus.WithError(err).WithField("dir", rl.root).Error("failed to remove mount temp dir")
|
||||
return err
|
||||
}
|
||||
rl.root = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewRWLayer creates a new read-write layer for the builder
|
||||
func (rl *rolayer) NewRWLayer() (builder.RWLayer, error) {
|
||||
snapshotter := rl.c.SnapshotService(rl.snapshotter)
|
||||
|
||||
// we need this here for the prepared snapshots or
|
||||
// we'll have racy behaviour where sometimes they
|
||||
// will get GC'd before we commit/use them
|
||||
ctx, _, err := rl.c.WithLease(context.TODO(), leases.WithRandomID(), leases.WithExpiration(1*time.Hour))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create lease for commit: %w", err)
|
||||
}
|
||||
|
||||
key := stringid.GenerateRandomID()
|
||||
mounts, err := snapshotter.Prepare(ctx, key, rl.diffID.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root, err := os.MkdirTemp(os.TempDir(), "rootfs-mount")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := mount.All(mounts, root); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &rwlayer{
|
||||
key: key,
|
||||
parent: rl.key,
|
||||
c: rl.c,
|
||||
snapshotter: rl.snapshotter,
|
||||
root: root,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type rwlayer struct {
|
||||
key string
|
||||
parent string
|
||||
c *containerd.Client
|
||||
snapshotter string
|
||||
root string
|
||||
}
|
||||
|
||||
func (rw *rwlayer) Root() string {
|
||||
return rw.root
|
||||
}
|
||||
|
||||
func (rw *rwlayer) Commit() (builder.ROLayer, error) {
|
||||
// we need this here for the prepared snapshots or
|
||||
// we'll have racy behaviour where sometimes they
|
||||
// will get GC'd before we commit/use them
|
||||
ctx, _, err := rw.c.WithLease(context.TODO(), leases.WithRandomID(), leases.WithExpiration(1*time.Hour))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create lease for commit: %w", err)
|
||||
}
|
||||
snapshotter := rw.c.SnapshotService(rw.snapshotter)
|
||||
|
||||
key := stringid.GenerateRandomID()
|
||||
err = snapshotter.Commit(ctx, key, rw.key)
|
||||
if err != nil && !cerrdefs.IsAlreadyExists(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
differ := rw.c.DiffService()
|
||||
desc, err := rootfs.CreateDiff(ctx, key, snapshotter, differ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := rw.c.ContentStore().Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffIDStr, ok := info.Labels["containerd.io/uncompressed"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid differ response with no diffID")
|
||||
}
|
||||
diffID, err := digest.Parse(diffIDStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &rolayer{
|
||||
key: key,
|
||||
c: rw.c,
|
||||
snapshotter: rw.snapshotter,
|
||||
diffID: diffID,
|
||||
root: "",
|
||||
contentStoreDigest: desc.Digest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (rw *rwlayer) Release() error {
|
||||
snapshotter := rw.c.SnapshotService(rw.snapshotter)
|
||||
err := snapshotter.Remove(context.TODO(), rw.key)
|
||||
if err != nil && !cerrdefs.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if rw.root == "" { // nothing to release
|
||||
return nil
|
||||
}
|
||||
if err := mount.UnmountAll(rw.root, 0); err != nil {
|
||||
logrus.WithError(err).WithField("root", rw.root).Error("failed to unmount ROLayer")
|
||||
return err
|
||||
}
|
||||
if err := os.Remove(rw.root); err != nil {
|
||||
logrus.WithError(err).WithField("dir", rw.root).Error("failed to remove mount temp dir")
|
||||
return err
|
||||
}
|
||||
rw.root = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateImage creates a new image by adding a config and ID to the image store.
|
||||
// This is similar to LoadImage() except that it receives JSON encoded bytes of
|
||||
// an image instead of a tar archive.
|
||||
func (i *ImageService) CreateImage(config []byte, parent string) (builder.Image, error) {
|
||||
return nil, errdefs.NotImplemented(errors.New("not implemented"))
|
||||
func (i *ImageService) CreateImage(ctx context.Context, config []byte, parent string, layerDigest digest.Digest) (builder.Image, error) {
|
||||
imgToCreate, err := dimage.NewFromJSON(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rootfs := ocispec.RootFS{
|
||||
Type: imgToCreate.RootFS.Type,
|
||||
DiffIDs: []digest.Digest{},
|
||||
}
|
||||
for _, diffId := range imgToCreate.RootFS.DiffIDs {
|
||||
rootfs.DiffIDs = append(rootfs.DiffIDs, digest.Digest(diffId))
|
||||
}
|
||||
exposedPorts := make(map[string]struct{}, len(imgToCreate.Config.ExposedPorts))
|
||||
for k, v := range imgToCreate.Config.ExposedPorts {
|
||||
exposedPorts[string(k)] = v
|
||||
}
|
||||
|
||||
var ociHistory []ocispec.History
|
||||
for _, history := range imgToCreate.History {
|
||||
created := history.Created
|
||||
ociHistory = append(ociHistory, ocispec.History{
|
||||
Created: &created,
|
||||
CreatedBy: history.CreatedBy,
|
||||
Author: history.Author,
|
||||
Comment: history.Comment,
|
||||
EmptyLayer: history.EmptyLayer,
|
||||
})
|
||||
}
|
||||
|
||||
// make an ocispec.Image from the docker/image.Image
|
||||
ociImgToCreate := ocispec.Image{
|
||||
Created: &imgToCreate.Created,
|
||||
Author: imgToCreate.Author,
|
||||
Architecture: imgToCreate.Architecture,
|
||||
Variant: imgToCreate.Variant,
|
||||
OS: imgToCreate.OS,
|
||||
OSVersion: imgToCreate.OSVersion,
|
||||
OSFeatures: imgToCreate.OSFeatures,
|
||||
Config: ocispec.ImageConfig{
|
||||
User: imgToCreate.Config.User,
|
||||
ExposedPorts: exposedPorts,
|
||||
Env: imgToCreate.Config.Env,
|
||||
Entrypoint: imgToCreate.Config.Entrypoint,
|
||||
Cmd: imgToCreate.Config.Cmd,
|
||||
Volumes: imgToCreate.Config.Volumes,
|
||||
WorkingDir: imgToCreate.Config.WorkingDir,
|
||||
Labels: imgToCreate.Config.Labels,
|
||||
StopSignal: imgToCreate.Config.StopSignal,
|
||||
},
|
||||
RootFS: rootfs,
|
||||
History: ociHistory,
|
||||
}
|
||||
|
||||
var layers []ocispec.Descriptor
|
||||
// if the image has a parent, we need to start with the parents layers descriptors
|
||||
if parent != "" {
|
||||
parentDesc, err := i.resolveDescriptor(ctx, parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentImageManifest, err := containerdimages.Manifest(ctx, i.client.ContentStore(), parentDesc, platforms.Default())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layers = parentImageManifest.Layers
|
||||
}
|
||||
|
||||
// get the info for the new layers
|
||||
info, err := i.client.ContentStore().Info(ctx, layerDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// append the new layer descriptor
|
||||
layers = append(layers,
|
||||
ocispec.Descriptor{
|
||||
MediaType: containerdimages.MediaTypeDockerSchema2LayerGzip,
|
||||
Digest: layerDigest,
|
||||
Size: info.Size,
|
||||
},
|
||||
)
|
||||
|
||||
// necessary to prevent the contents from being GC'd
|
||||
// between writing them here and creating an image
|
||||
ctx, done, err := i.client.WithLease(ctx, leases.WithRandomID(), leases.WithExpiration(1*time.Hour))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer done(ctx)
|
||||
|
||||
commitManifestDesc, err := writeContentsForImage(ctx, i.snapshotter, i.client.ContentStore(), ociImgToCreate, layers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// image create
|
||||
img := containerdimages.Image{
|
||||
Name: danglingImageName(commitManifestDesc.Digest),
|
||||
Target: commitManifestDesc,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
createdImage, err := i.client.ImageService().Update(ctx, img)
|
||||
if err != nil {
|
||||
if !cerrdefs.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if createdImage, err = i.client.ImageService().Create(ctx, img); err != nil {
|
||||
return nil, fmt.Errorf("failed to create new image: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := i.unpackImage(ctx, createdImage, platforms.DefaultSpec()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newImage := dimage.NewImage(dimage.ID(createdImage.Target.Digest))
|
||||
newImage.V1Image = imgToCreate.V1Image
|
||||
newImage.V1Image.ID = string(createdImage.Target.Digest)
|
||||
newImage.History = imgToCreate.History
|
||||
return newImage, nil
|
||||
}
|
||||
|
|
|
@ -127,3 +127,71 @@ func isRootfsChildOf(child ocispec.RootFS, parent ocispec.RootFS) bool {
|
|||
|
||||
return true
|
||||
}
|
||||
|
||||
// parents returns a slice of image IDs whose entire rootfs contents match,
|
||||
// in order, the childs first layers, excluding images with the exact same
|
||||
// rootfs.
|
||||
//
|
||||
// Called from image_delete.go to prune dangling parents.
|
||||
func (i *ImageService) parents(ctx context.Context, id image.ID) ([]imageWithRootfs, error) {
|
||||
target, err := i.resolveDescriptor(ctx, id.String())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get child image")
|
||||
}
|
||||
|
||||
cs := i.client.ContentStore()
|
||||
|
||||
allPlatforms, err := containerdimages.Platforms(ctx, cs, target)
|
||||
if err != nil {
|
||||
return nil, errdefs.System(errors.Wrap(err, "failed to list platforms supported by image"))
|
||||
}
|
||||
|
||||
var childRootFS []ocispec.RootFS
|
||||
for _, platform := range allPlatforms {
|
||||
rootfs, err := platformRootfs(ctx, cs, target, platform)
|
||||
if err != nil {
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return nil, errdefs.System(errors.Wrap(err, "failed to get platform-specific rootfs"))
|
||||
}
|
||||
|
||||
childRootFS = append(childRootFS, rootfs)
|
||||
}
|
||||
|
||||
imgs, err := i.client.ImageService().List(ctx)
|
||||
if err != nil {
|
||||
return nil, errdefs.System(errors.Wrap(err, "failed to list all images"))
|
||||
}
|
||||
|
||||
var parents []imageWithRootfs
|
||||
for _, img := range imgs {
|
||||
nextImage:
|
||||
for _, platform := range allPlatforms {
|
||||
rootfs, err := platformRootfs(ctx, cs, img.Target, platform)
|
||||
if err != nil {
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return nil, errdefs.System(errors.Wrap(err, "failed to get platform-specific rootfs"))
|
||||
}
|
||||
|
||||
for _, childRoot := range childRootFS {
|
||||
if isRootfsChildOf(childRoot, rootfs) {
|
||||
parents = append(parents, imageWithRootfs{
|
||||
img: img,
|
||||
rootfs: rootfs,
|
||||
})
|
||||
break nextImage
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return parents, nil
|
||||
}
|
||||
|
||||
type imageWithRootfs struct {
|
||||
img containerdimages.Image
|
||||
rootfs ocispec.RootFS
|
||||
}
|
||||
|
|
|
@ -6,9 +6,9 @@ import (
|
|||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
|
@ -19,7 +19,6 @@ import (
|
|||
"github.com/containerd/containerd/rootfs"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
|
@ -142,10 +141,11 @@ func generateCommitImageConfig(baseConfig ocispec.Image, diffID digest.Digest, o
|
|||
DiffIDs: append(baseConfig.RootFS.DiffIDs, diffID),
|
||||
},
|
||||
History: append(baseConfig.History, ocispec.History{
|
||||
Created: &createdTime,
|
||||
CreatedBy: "", // FIXME(ndeloof) ?
|
||||
Author: opts.Author,
|
||||
Comment: opts.Comment,
|
||||
Created: &createdTime,
|
||||
CreatedBy: strings.Join(opts.ContainerConfig.Cmd, " "),
|
||||
Author: opts.Author,
|
||||
Comment: opts.Comment,
|
||||
// TODO(laurazard): this check might be incorrect
|
||||
EmptyLayer: diffID == "",
|
||||
}),
|
||||
}
|
||||
|
@ -297,5 +297,13 @@ func uniquePart() string {
|
|||
//
|
||||
// This is a temporary shim. Should be removed when builder stops using commit.
|
||||
func (i *ImageService) CommitBuildStep(ctx context.Context, c backend.CommitConfig) (image.ID, error) {
|
||||
return "", errdefs.NotImplemented(errors.New("not implemented"))
|
||||
ctr := i.containers.Get(c.ContainerID)
|
||||
if ctr == nil {
|
||||
// TODO: use typed error
|
||||
return "", fmt.Errorf("container not found: %s", c.ContainerID)
|
||||
}
|
||||
c.ContainerMountLabel = ctr.MountLabel
|
||||
c.ContainerOS = ctr.OS
|
||||
c.ParentImageID = string(ctr.ImageID)
|
||||
return i.CommitImage(ctx, c)
|
||||
}
|
||||
|
|
|
@ -2,10 +2,16 @@ package containerd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -30,8 +36,6 @@ import (
|
|||
// are divided into two categories grouped by their severity:
|
||||
//
|
||||
// Hard Conflict:
|
||||
// - a pull or build using the image.
|
||||
// - any descendant image.
|
||||
// - any running container using the image.
|
||||
//
|
||||
// Soft Conflict:
|
||||
|
@ -45,8 +49,6 @@ import (
|
|||
// meaning any delete conflicts will cause the image to not be deleted and the
|
||||
// conflict will not be reported.
|
||||
//
|
||||
// TODO(thaJeztah): implement ImageDelete "force" options; see https://github.com/moby/moby/issues/43850
|
||||
// TODO(thaJeztah): implement ImageDelete "prune" options; see https://github.com/moby/moby/issues/43849
|
||||
// TODO(thaJeztah): image delete should send prometheus counters; see https://github.com/moby/moby/issues/45268
|
||||
func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) {
|
||||
parsedRef, err := reference.ParseNormalizedNamed(imageRef)
|
||||
|
@ -59,28 +61,278 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
possiblyDeletedConfigs := map[digest.Digest]struct{}{}
|
||||
if err := i.walkPresentChildren(ctx, img.Target, func(_ context.Context, d ocispec.Descriptor) {
|
||||
if images.IsConfigType(d.MediaType) {
|
||||
possiblyDeletedConfigs[d.Digest] = struct{}{}
|
||||
}
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
imgID := image.ID(img.Target.Digest)
|
||||
|
||||
if isImageIDPrefix(imgID.String(), imageRef) {
|
||||
return i.deleteAll(ctx, img, force, prune)
|
||||
}
|
||||
|
||||
err = i.client.ImageService().Delete(ctx, img.Name, images.SynchronousDelete())
|
||||
singleRef, err := i.isSingleReference(ctx, img)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Workaround for: https://github.com/moby/buildkit/issues/3797
|
||||
if err := i.unleaseSnapshotsFromDeletedConfigs(context.Background(), possiblyDeletedConfigs); err != nil {
|
||||
logrus.WithError(err).Warn("failed to unlease snapshots")
|
||||
if !singleRef {
|
||||
err := i.client.ImageService().Delete(ctx, img.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i.LogImageEvent(imgID.String(), imgID.String(), "untag")
|
||||
records := []types.ImageDeleteResponseItem{{Untagged: reference.FamiliarString(reference.TagNameOnly(parsedRef))}}
|
||||
return records, nil
|
||||
}
|
||||
|
||||
imgID := string(img.Target.Digest)
|
||||
i.LogImageEvent(imgID, imgID, "untag")
|
||||
i.LogImageEvent(imgID, imgID, "delete")
|
||||
using := func(c *container.Container) bool {
|
||||
return c.ImageID == imgID
|
||||
}
|
||||
ctr := i.containers.First(using)
|
||||
if ctr != nil {
|
||||
if !force {
|
||||
// If we removed the repository reference then
|
||||
// this image would remain "dangling" and since
|
||||
// we really want to avoid that the client must
|
||||
// explicitly force its removal.
|
||||
refString := reference.FamiliarString(reference.TagNameOnly(parsedRef))
|
||||
err := &imageDeleteConflict{
|
||||
reference: refString,
|
||||
used: true,
|
||||
message: fmt.Sprintf("container %s is using its referenced image %s",
|
||||
stringid.TruncateID(ctr.ID),
|
||||
stringid.TruncateID(imgID.String())),
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []types.ImageDeleteResponseItem{{Untagged: reference.FamiliarString(parsedRef)}}, nil
|
||||
err := i.softImageDelete(ctx, img)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
i.LogImageEvent(imgID.String(), imgID.String(), "untag")
|
||||
records := []types.ImageDeleteResponseItem{{Untagged: reference.FamiliarString(reference.TagNameOnly(parsedRef))}}
|
||||
return records, nil
|
||||
}
|
||||
|
||||
return i.deleteAll(ctx, img, force, prune)
|
||||
}
|
||||
|
||||
// deleteAll deletes the image from the daemon, and if prune is true,
|
||||
// also deletes dangling parents if there is no conflict in doing so.
|
||||
// Parent images are removed quietly, and if there is any issue/conflict
|
||||
// it is logged but does not halt execution/an error is not returned.
|
||||
func (i *ImageService) deleteAll(ctx context.Context, img images.Image, force, prune bool) ([]types.ImageDeleteResponseItem, error) {
|
||||
var records []types.ImageDeleteResponseItem
|
||||
|
||||
// Workaround for: https://github.com/moby/buildkit/issues/3797
|
||||
possiblyDeletedConfigs := map[digest.Digest]struct{}{}
|
||||
err := i.walkPresentChildren(ctx, img.Target, func(_ context.Context, d ocispec.Descriptor) {
|
||||
if images.IsConfigType(d.MediaType) {
|
||||
possiblyDeletedConfigs[d.Digest] = struct{}{}
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := i.unleaseSnapshotsFromDeletedConfigs(context.Background(), possiblyDeletedConfigs); err != nil {
|
||||
logrus.WithError(err).Warn("failed to unlease snapshots")
|
||||
}
|
||||
}()
|
||||
|
||||
imgID := img.Target.Digest.String()
|
||||
|
||||
var parents []imageWithRootfs
|
||||
if prune {
|
||||
parents, err = i.parents(ctx, image.ID(imgID))
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warn("failed to get image parents")
|
||||
}
|
||||
sortParentsByAffinity(parents)
|
||||
}
|
||||
|
||||
imageRefs, err := i.client.ImageService().List(ctx, "target.digest=="+imgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, imageRef := range imageRefs {
|
||||
if err := i.imageDeleteHelper(ctx, imageRef, &records, force); err != nil {
|
||||
return records, err
|
||||
}
|
||||
}
|
||||
i.LogImageEvent(imgID, imgID, "delete")
|
||||
records = append(records, types.ImageDeleteResponseItem{Deleted: imgID})
|
||||
|
||||
for _, parent := range parents {
|
||||
if !isDanglingImage(parent.img) {
|
||||
break
|
||||
}
|
||||
err = i.imageDeleteHelper(ctx, parent.img, &records, false)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warn("failed to remove image parent")
|
||||
break
|
||||
}
|
||||
parentID := parent.img.Target.Digest.String()
|
||||
i.LogImageEvent(parentID, parentID, "delete")
|
||||
records = append(records, types.ImageDeleteResponseItem{Deleted: parentID})
|
||||
}
|
||||
|
||||
return records, nil
|
||||
}
|
||||
|
||||
// isImageIDPrefix returns whether the given
|
||||
// possiblePrefix is a prefix of the given imageID.
|
||||
func isImageIDPrefix(imageID, possiblePrefix string) bool {
|
||||
if strings.HasPrefix(imageID, possiblePrefix) {
|
||||
return true
|
||||
}
|
||||
if i := strings.IndexRune(imageID, ':'); i >= 0 {
|
||||
return strings.HasPrefix(imageID[i+1:], possiblePrefix)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func sortParentsByAffinity(parents []imageWithRootfs) {
|
||||
sort.Slice(parents, func(i, j int) bool {
|
||||
lenRootfsI := len(parents[i].rootfs.DiffIDs)
|
||||
lenRootfsJ := len(parents[j].rootfs.DiffIDs)
|
||||
if lenRootfsI == lenRootfsJ {
|
||||
return isDanglingImage(parents[i].img)
|
||||
}
|
||||
return lenRootfsI > lenRootfsJ
|
||||
})
|
||||
}
|
||||
|
||||
// isSingleReference returns true if there are no other images in the
|
||||
// daemon targeting the same content as `img` that are not dangling.
|
||||
func (i *ImageService) isSingleReference(ctx context.Context, img images.Image) (bool, error) {
|
||||
refs, err := i.client.ImageService().List(ctx, "target.digest=="+img.Target.Digest.String())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, ref := range refs {
|
||||
if !isDanglingImage(ref) && ref.Name != img.Name {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type conflictType int
|
||||
|
||||
const (
|
||||
conflictRunningContainer conflictType = 1 << iota
|
||||
conflictActiveReference
|
||||
conflictStoppedContainer
|
||||
conflictHard = conflictRunningContainer
|
||||
conflictSoft = conflictActiveReference | conflictStoppedContainer
|
||||
)
|
||||
|
||||
// imageDeleteHelper attempts to delete the given image from this daemon.
|
||||
// If the image has any hard delete conflicts (running containers using
|
||||
// the image) then it cannot be deleted. If the image has any soft delete
|
||||
// conflicts (any tags/digests referencing the image or any stopped container
|
||||
// using the image) then it can only be deleted if force is true. Any deleted
|
||||
// images and untagged references are appended to the given records. If any
|
||||
// error or conflict is encountered, it will be returned immediately without
|
||||
// deleting the image.
|
||||
func (i *ImageService) imageDeleteHelper(ctx context.Context, img images.Image, records *[]types.ImageDeleteResponseItem, force bool) error {
|
||||
// First, determine if this image has any conflicts. Ignore soft conflicts
|
||||
// if force is true.
|
||||
c := conflictHard
|
||||
if !force {
|
||||
c |= conflictSoft
|
||||
}
|
||||
|
||||
imgID := image.ID(img.Target.Digest)
|
||||
|
||||
err := i.checkImageDeleteConflict(ctx, imgID, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
untaggedRef, err := reference.ParseAnyReference(img.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = i.client.ImageService().Delete(ctx, img.Name, images.SynchronousDelete())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i.LogImageEvent(imgID.String(), imgID.String(), "untag")
|
||||
*records = append(*records, types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(untaggedRef)})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImageDeleteConflict holds a soft or hard conflict and associated
|
||||
// error. A hard conflict represents a running container using the
|
||||
// image, while a soft conflict is any tags/digests referencing the
|
||||
// given image or any stopped container using the image.
|
||||
// Implements the error interface.
|
||||
type imageDeleteConflict struct {
|
||||
hard bool
|
||||
used bool
|
||||
reference string
|
||||
message string
|
||||
}
|
||||
|
||||
func (idc *imageDeleteConflict) Error() string {
|
||||
var forceMsg string
|
||||
if idc.hard {
|
||||
forceMsg = "cannot be forced"
|
||||
} else {
|
||||
forceMsg = "must be forced"
|
||||
}
|
||||
return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", idc.reference, forceMsg, idc.message)
|
||||
}
|
||||
|
||||
func (imageDeleteConflict) Conflict() {}
|
||||
|
||||
// checkImageDeleteConflict returns a conflict representing
|
||||
// any issue preventing deletion of the given image ID, and
|
||||
// nil if there are none. It takes a bitmask representing a
|
||||
// filter for which conflict types the caller cares about,
|
||||
// and will only check for these conflict types.
|
||||
func (i *ImageService) checkImageDeleteConflict(ctx context.Context, imgID image.ID, mask conflictType) error {
|
||||
if mask&conflictRunningContainer != 0 {
|
||||
running := func(c *container.Container) bool {
|
||||
return c.ImageID == imgID && c.IsRunning()
|
||||
}
|
||||
if ctr := i.containers.First(running); ctr != nil {
|
||||
return &imageDeleteConflict{
|
||||
reference: stringid.TruncateID(imgID.String()),
|
||||
hard: true,
|
||||
used: true,
|
||||
message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(ctr.ID)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if mask&conflictStoppedContainer != 0 {
|
||||
stopped := func(c *container.Container) bool {
|
||||
return !c.IsRunning() && c.ImageID == imgID
|
||||
}
|
||||
if ctr := i.containers.First(stopped); ctr != nil {
|
||||
return &imageDeleteConflict{
|
||||
reference: stringid.TruncateID(imgID.String()),
|
||||
used: true,
|
||||
message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(ctr.ID)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if mask&conflictActiveReference != 0 {
|
||||
refs, err := i.client.ImageService().List(ctx, "target.digest=="+imgID.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(refs) > 1 {
|
||||
return &imageDeleteConflict{
|
||||
reference: stringid.TruncateID(imgID.String()),
|
||||
message: "image is referenced in multiple repositories",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -90,13 +90,16 @@ func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*imaget
|
|||
return nil, err
|
||||
}
|
||||
|
||||
tags := make([]string, len(tagged))
|
||||
for i, t := range tagged {
|
||||
var tags []string
|
||||
for _, t := range tagged {
|
||||
if isDanglingImage(t) {
|
||||
continue
|
||||
}
|
||||
name, err := reference.ParseNamed(t.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tags[i] = reference.FamiliarString(name)
|
||||
tags = append(tags, reference.FamiliarString(name))
|
||||
}
|
||||
history[0].Tags = tags
|
||||
}
|
||||
|
|
|
@ -525,9 +525,12 @@ func getManifestPlatform(ctx context.Context, store content.Provider, manifestDe
|
|||
return platforms.Normalize(platform), nil
|
||||
}
|
||||
|
||||
// isImageManifest returns true if the manifest has any layer that is a known image layer.
|
||||
// isImageManifest returns true if the manifest has no layers or any of its layers is a known image layer.
|
||||
// Some manifests use the image media type for compatibility, even if they are not a real image.
|
||||
func isImageManifest(mfst v1.Manifest) bool {
|
||||
if len(mfst.Layers) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, l := range mfst.Layers {
|
||||
if images.IsLayerType(l.MediaType) {
|
||||
return true
|
||||
|
|
|
@ -152,7 +152,7 @@ func (p pullProgress) UpdateProgress(ctx context.Context, ongoing *jobs, out pro
|
|||
} else if p.ShowExists {
|
||||
out.WriteProgress(progress.Progress{
|
||||
ID: stringid.TruncateID(j.Digest.Encoded()),
|
||||
Action: "Exists",
|
||||
Action: "Already exists",
|
||||
HideCounts: true,
|
||||
LastUpdate: true,
|
||||
})
|
||||
|
|
|
@ -24,7 +24,12 @@ func (i *ImageService) newResolverFromAuthConfig(authConfig *registrytypes.AuthC
|
|||
}), tracker
|
||||
}
|
||||
|
||||
func hostsWrapper(hostsFn docker.RegistryHosts, authConfig *registrytypes.AuthConfig, regService RegistryConfigProvider) docker.RegistryHosts {
|
||||
func hostsWrapper(hostsFn docker.RegistryHosts, optAuthConfig *registrytypes.AuthConfig, regService RegistryConfigProvider) docker.RegistryHosts {
|
||||
var authorizer docker.Authorizer
|
||||
if optAuthConfig != nil {
|
||||
authorizer = docker.NewDockerAuthorizer(authorizationCredsFromAuthConfig(*optAuthConfig))
|
||||
}
|
||||
|
||||
return func(n string) ([]docker.RegistryHost, error) {
|
||||
hosts, err := hostsFn(n)
|
||||
if err != nil {
|
||||
|
@ -33,12 +38,7 @@ func hostsWrapper(hostsFn docker.RegistryHosts, authConfig *registrytypes.AuthCo
|
|||
|
||||
for i := range hosts {
|
||||
if hosts[i].Authorizer == nil {
|
||||
var opts []docker.AuthorizerOpt
|
||||
if authConfig != nil {
|
||||
opts = append(opts, authorizationCredsFromAuthConfig(*authConfig))
|
||||
}
|
||||
hosts[i].Authorizer = docker.NewDockerAuthorizer(opts...)
|
||||
|
||||
hosts[i].Authorizer = authorizer
|
||||
isInsecure := regService.IsInsecureRegistry(hosts[i].Host)
|
||||
if hosts[i].Client.Transport != nil && isInsecure {
|
||||
hosts[i].Client.Transport = httpFallback{super: hosts[i].Client.Transport}
|
||||
|
@ -51,13 +51,16 @@ func hostsWrapper(hostsFn docker.RegistryHosts, authConfig *registrytypes.AuthCo
|
|||
|
||||
func authorizationCredsFromAuthConfig(authConfig registrytypes.AuthConfig) docker.AuthorizerOpt {
|
||||
cfgHost := registry.ConvertToHostname(authConfig.ServerAddress)
|
||||
if cfgHost == registry.IndexHostname {
|
||||
if cfgHost == "" || cfgHost == registry.IndexHostname {
|
||||
cfgHost = registry.DefaultRegistryHost
|
||||
}
|
||||
|
||||
return docker.WithAuthCreds(func(host string) (string, string, error) {
|
||||
if cfgHost != host {
|
||||
logrus.WithField("host", host).WithField("cfgHost", cfgHost).Warn("Host doesn't match")
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"host": host,
|
||||
"cfgHost": cfgHost,
|
||||
}).Warn("Host doesn't match")
|
||||
return "", "", nil
|
||||
}
|
||||
if authConfig.IdentityToken != "" {
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/containerd/containerd/plugin"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/docker/distribution/reference"
|
||||
imagetypes "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/container"
|
||||
daemonevents "github.com/docker/docker/daemon/events"
|
||||
|
@ -17,6 +18,7 @@ import (
|
|||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
@ -41,6 +43,7 @@ type RegistryHostsProvider interface {
|
|||
|
||||
type RegistryConfigProvider interface {
|
||||
IsInsecureRegistry(host string) bool
|
||||
ResolveRepository(name reference.Named) (*registry.RepositoryInfo, error)
|
||||
}
|
||||
|
||||
type ImageServiceConfig struct {
|
||||
|
|
|
@ -129,6 +129,13 @@ type Daemon struct {
|
|||
// It stores metadata for the content store (used for manifest caching)
|
||||
// This needs to be closed on daemon exit
|
||||
mdDB *bbolt.DB
|
||||
|
||||
usesSnapshotter bool
|
||||
}
|
||||
|
||||
// ID returns the daemon id
|
||||
func (daemon *Daemon) ID() string {
|
||||
return daemon.id
|
||||
}
|
||||
|
||||
// StoreHosts stores the addresses the daemon is listening on
|
||||
|
@ -153,16 +160,7 @@ func (daemon *Daemon) Features() *map[string]bool {
|
|||
|
||||
// UsesSnapshotter returns true if feature flag to use containerd snapshotter is enabled
|
||||
func (daemon *Daemon) UsesSnapshotter() bool {
|
||||
// TEST_INTEGRATION_USE_SNAPSHOTTER is used for integration tests only.
|
||||
if os.Getenv("TEST_INTEGRATION_USE_SNAPSHOTTER") != "" {
|
||||
return true
|
||||
}
|
||||
if daemon.configStore.Features != nil {
|
||||
if b, ok := daemon.configStore.Features["containerd-snapshotter"]; ok {
|
||||
return b
|
||||
}
|
||||
}
|
||||
return false
|
||||
return daemon.usesSnapshotter
|
||||
}
|
||||
|
||||
// RegistryHosts returns registry configuration in containerd resolvers format
|
||||
|
@ -796,6 +794,13 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S
|
|||
startupDone: make(chan struct{}),
|
||||
}
|
||||
|
||||
// TEST_INTEGRATION_USE_SNAPSHOTTER is used for integration tests only.
|
||||
if os.Getenv("TEST_INTEGRATION_USE_SNAPSHOTTER") != "" {
|
||||
d.usesSnapshotter = true
|
||||
} else {
|
||||
d.usesSnapshotter = config.Features["containerd-snapshotter"]
|
||||
}
|
||||
|
||||
// Ensure the daemon is properly shutdown if there is a failure during
|
||||
// initialization
|
||||
defer func() {
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/daemon/graphdriver/graphtest"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -17,8 +16,6 @@ func init() {
|
|||
// errors or hangs to be debugged directly from the test process.
|
||||
untar = archive.UntarUncompressed
|
||||
graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer
|
||||
|
||||
reexec.Init()
|
||||
}
|
||||
|
||||
// This avoids creating a new driver for each test if all tests are run
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/daemon/graphdriver/graphtest"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -18,8 +17,6 @@ func init() {
|
|||
// errors or hangs to be debugged directly from the test process.
|
||||
untar = archive.UntarUncompressed
|
||||
graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer
|
||||
|
||||
reexec.Init()
|
||||
}
|
||||
|
||||
func skipIfNaive(t *testing.T) {
|
||||
|
|
|
@ -7,14 +7,8 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/docker/docker/daemon/graphdriver/graphtest"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Init()
|
||||
}
|
||||
|
||||
// This avoids creating a new driver for each test if all tests are run
|
||||
// Make sure to put new tests between TestVfsSetup and TestVfsTeardown
|
||||
func TestVfsSetup(t *testing.T) {
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/opencontainers/go-digest"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
|
@ -27,7 +28,7 @@ type ImageService interface {
|
|||
|
||||
PullImage(ctx context.Context, name, tag string, platform *v1.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
PushImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
CreateImage(config []byte, parent string) (builder.Image, error)
|
||||
CreateImage(ctx context.Context, config []byte, parent string, contentStoreDigest digest.Digest) (builder.Image, error)
|
||||
ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error)
|
||||
ExportImage(ctx context.Context, names []string, outStream io.Writer) error
|
||||
PerformWithBaseFS(ctx context.Context, c *container.Container, fn func(string) error) error
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
registrypkg "github.com/docker/docker/registry"
|
||||
"github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -30,6 +31,10 @@ type roLayer struct {
|
|||
roLayer layer.Layer
|
||||
}
|
||||
|
||||
func (l *roLayer) ContentStoreDigest() digest.Digest {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (l *roLayer) DiffID() layer.DiffID {
|
||||
if l.roLayer == nil {
|
||||
return layer.DigestSHA256EmptyTar
|
||||
|
@ -241,7 +246,7 @@ func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID s
|
|||
// CreateImage creates a new image by adding a config and ID to the image store.
|
||||
// This is similar to LoadImage() except that it receives JSON encoded bytes of
|
||||
// an image instead of a tar archive.
|
||||
func (i *ImageService) CreateImage(config []byte, parent string) (builder.Image, error) {
|
||||
func (i *ImageService) CreateImage(ctx context.Context, config []byte, parent string, _ digest.Digest) (builder.Image, error) {
|
||||
id, err := i.imageStore.Create(config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create image")
|
||||
|
|
|
@ -63,7 +63,7 @@ func (i *ImageService) PullImage(ctx context.Context, image, tag string, platfor
|
|||
// we allow the image to have a non-matching architecture. The code
|
||||
// below checks for this situation, and returns a warning to the client,
|
||||
// as well as logging it to the daemon logs.
|
||||
img, err := i.GetImage(ctx, image, imagetypes.GetImageOpts{Platform: platform})
|
||||
img, err := i.GetImage(ctx, ref.String(), imagetypes.GetImageOpts{Platform: platform})
|
||||
|
||||
// Note that this is a special case where GetImage returns both an image
|
||||
// and an error: https://github.com/docker/docker/blob/v20.10.7/daemon/images/image.go#L175-L183
|
||||
|
|
|
@ -78,7 +78,8 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
|
|||
}
|
||||
|
||||
attributes := map[string]string{
|
||||
"exitCode": strconv.Itoa(exitStatus.ExitCode),
|
||||
"exitCode": strconv.Itoa(exitStatus.ExitCode),
|
||||
"execDuration": strconv.Itoa(int(execDuration.Seconds())),
|
||||
}
|
||||
daemon.Cleanup(c)
|
||||
|
||||
|
|
|
@ -63,10 +63,10 @@ func (daemon *Daemon) Reload(conf *config.Config) (err error) {
|
|||
if err := daemon.reloadAllowNondistributableArtifacts(conf, attributes); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := daemon.reloadInsecureRegistries(conf, attributes); err != nil {
|
||||
if err := daemon.reloadRegistryMirrors(conf, attributes); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := daemon.reloadRegistryMirrors(conf, attributes); err != nil {
|
||||
if err := daemon.reloadInsecureRegistries(conf, attributes); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := daemon.reloadLiveRestore(conf, attributes); err != nil {
|
||||
|
|
|
@ -238,13 +238,19 @@ func TestDaemonReloadInsecureRegistries(t *testing.T) {
|
|||
"docker3.example.com", // this will be newly added
|
||||
}
|
||||
|
||||
mirrors := []string{
|
||||
"https://mirror.test.example.com",
|
||||
}
|
||||
|
||||
valuesSets := make(map[string]interface{})
|
||||
valuesSets["insecure-registries"] = insecureRegistries
|
||||
valuesSets["registry-mirrors"] = mirrors
|
||||
|
||||
newConfig := &config.Config{
|
||||
CommonConfig: config.CommonConfig{
|
||||
ServiceOptions: registry.ServiceOptions{
|
||||
InsecureRegistries: insecureRegistries,
|
||||
Mirrors: mirrors,
|
||||
},
|
||||
ValuesSet: valuesSets,
|
||||
},
|
||||
|
|
|
@ -59,6 +59,11 @@ variable "GITHUB_SHA" {
|
|||
default = ""
|
||||
}
|
||||
|
||||
# Special target: https://github.com/docker/metadata-action#bake-definition
|
||||
target "docker-metadata-action" {
|
||||
tags = ["moby-bin:local"]
|
||||
}
|
||||
|
||||
# Defines the output folder
|
||||
variable "DESTDIR" {
|
||||
default = ""
|
||||
|
@ -152,6 +157,29 @@ target "all-cross" {
|
|||
inherits = ["all", "_platforms"]
|
||||
}
|
||||
|
||||
#
|
||||
# bin image
|
||||
#
|
||||
|
||||
target "bin-image" {
|
||||
inherits = ["all", "docker-metadata-action"]
|
||||
output = ["type=docker"]
|
||||
}
|
||||
|
||||
target "bin-image-cross" {
|
||||
inherits = ["bin-image"]
|
||||
output = ["type=image"]
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
"linux/arm/v6",
|
||||
"linux/arm/v7",
|
||||
"linux/arm64",
|
||||
"linux/ppc64le",
|
||||
"linux/s390x",
|
||||
"windows/amd64"
|
||||
]
|
||||
}
|
||||
|
||||
#
|
||||
# dev
|
||||
#
|
||||
|
|
|
@ -23,9 +23,9 @@ keywords: "API, Docker, rcli, REST, documentation"
|
|||
* `GET /images/json` no longer includes hardcoded `<none>:<none>` and
|
||||
`<none>@<none>` in `RepoTags` and`RepoDigests` for untagged images.
|
||||
In such cases, empty arrays will be produced instead.
|
||||
* The `VirtualSize` field in the `GET /images/{name}/json` and `GET /images//json`
|
||||
responses is deprecated and will no longer be included in API v1.44. Use the
|
||||
`Size` field instead, which contains the same information.
|
||||
* The `VirtualSize` field in the `GET /images/{name}/json`, `GET /images/json`,
|
||||
and `GET /system/df` responses is deprecated and will no longer be included
|
||||
in API v1.44. Use the `Size` field instead, which contains the same information.
|
||||
* `GET /info` now includes `no-new-privileges` in the `SecurityOptions` string
|
||||
list when this option is enabled globally. This change is not versioned, and
|
||||
affects all API versions if the daemon has this patch.
|
||||
|
|
|
@ -15,7 +15,7 @@ set -e
|
|||
# the binary version you may also need to update the vendor version to pick up
|
||||
# bug fixes or new APIs, however, usually the Go packages are built from a
|
||||
# commit from the master branch.
|
||||
: "${CONTAINERD_VERSION:=v1.7.0}"
|
||||
: "${CONTAINERD_VERSION:=v1.7.1}"
|
||||
|
||||
install_containerd() (
|
||||
echo "Install containerd version $CONTAINERD_VERSION"
|
||||
|
|
|
@ -51,6 +51,18 @@ source "${MAKEDIR}/.go-autogen"
|
|||
fi
|
||||
fi
|
||||
|
||||
# XXX: Disable netgo on Windows and use Window's system resolver instead.
|
||||
#
|
||||
# go1.19 and newer added support for netgo on Windows (https://go.dev/doc/go1.19#net),
|
||||
# which won't ask Windows for DNS results, and hence may be ignoring
|
||||
# custom "C:\Windows\System32\drivers\etc\hosts".
|
||||
# See https://github.com/moby/moby/issues/45251#issuecomment-1561001817
|
||||
# https://github.com/moby/moby/issues/45251, and
|
||||
# https://go-review.googlesource.com/c/go/+/467335
|
||||
if [ "$(go env GOOS)" = "windows" ]; then
|
||||
BUILDFLAGS=("${BUILDFLAGS[@]/netgo/}")
|
||||
fi
|
||||
|
||||
# only necessary for non-sandboxed invocation where TARGETPLATFORM is empty
|
||||
PLATFORM_NAME=$TARGETPLATFORM
|
||||
if [ -z "$PLATFORM_NAME" ]; then
|
||||
|
|
|
@ -1,7 +1,12 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
if ! docker image inspect emptyfs > /dev/null; then
|
||||
function imageNotPresent {
|
||||
local img="$1"
|
||||
! docker image inspect "$img" > /dev/null 2> /dev/null
|
||||
}
|
||||
|
||||
if imageNotPresent "emptyfs"; then
|
||||
# build a "docker save" tarball for "emptyfs"
|
||||
# see https://github.com/docker/docker/pull/5262
|
||||
# and also https://github.com/docker/docker/issues/4242
|
||||
|
@ -24,3 +29,27 @@ if ! docker image inspect emptyfs > /dev/null; then
|
|||
)
|
||||
rm -rf "$dir"
|
||||
fi
|
||||
|
||||
# without c8d image store, image id is the config's id
|
||||
dangling_cfg=0df1207206e5288f4a989a2f13d1f5b3c4e70467702c1d5d21dfc9f002b7bd43
|
||||
# with c8d image store, image id is the id of manifest/manifest list.
|
||||
dangling_mfst=16d365089e5c10e1673ee82ab5bba38ade9b763296ad918bd24b42a1156c5456
|
||||
if imageNotPresent "$dangling_cfg" && imageNotPresent "$dangling_mfst"; then
|
||||
dir="$DEST/dangling"
|
||||
mkdir -p "$dir"
|
||||
(
|
||||
cd "$dir"
|
||||
printf '{"schemaVersion":2,"manifests":[{"mediaType":"application/vnd.docker.distribution.manifest.v2+json","digest":"sha256:16d365089e5c10e1673ee82ab5bba38ade9b763296ad918bd24b42a1156c5456","size":264,"annotations":{"org.opencontainers.image.created":"2023-05-19T08:00:44Z"},"platform":{"architecture":"amd64","os":"linux"}}]}' > index.json
|
||||
printf '[{"Config":"blobs/sha256/0df1207206e5288f4a989a2f13d1f5b3c4e70467702c1d5d21dfc9f002b7bd43","RepoTags":null,"Layers":null}]' > manifest.json
|
||||
mkdir -p blobs/sha256
|
||||
printf '{"schemaVersion":2,"mediaType":"application/vnd.docker.distribution.manifest.v2+json","config":{"mediaType":"application/vnd.docker.container.image.v1+json","digest":"sha256:0df1207206e5288f4a989a2f13d1f5b3c4e70467702c1d5d21dfc9f002b7bd43","size":390},"layers":[]}' > blobs/sha256/$dangling_mfst
|
||||
printf '{"architecture":"amd64","config":{"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"WorkingDir":"/","Labels":{"org.mobyproject.test.specialimage":"1"},"OnBuild":null},"created":null,"history":[{"created_by":"LABEL org.mobyproject.test.specialimage=1","comment":"buildkit.dockerfile.v0","empty_layer":true}],"os":"linux","rootfs":{"type":"layers","diff_ids":null}}' > blobs/sha256/$dangling_cfg
|
||||
tar -cf layer.tar --files-from /dev/null
|
||||
)
|
||||
(
|
||||
[ -n "$TESTDEBUG" ] && set -x
|
||||
tar -cC "$dir" . | docker load
|
||||
)
|
||||
rm -rf "$dir"
|
||||
|
||||
fi
|
|
@ -3,5 +3,5 @@ set -e
|
|||
|
||||
source "$MAKEDIR/.detect-daemon-osarch"
|
||||
if [ "$DOCKER_ENGINE_GOOS" != "windows" ]; then
|
||||
bundle .ensure-emptyfs
|
||||
bundle .build-empty-images
|
||||
fi
|
||||
|
|
|
@ -81,5 +81,5 @@ set_platform_timeout() {
|
|||
fi
|
||||
}
|
||||
|
||||
sh /scripts/ensure-emptyfs.sh
|
||||
sh /scripts/build-empty-images.sh
|
||||
run_test_integration
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/integration-cli/environment"
|
||||
"github.com/docker/docker/internal/test/suite"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
testdaemon "github.com/docker/docker/testutil/daemon"
|
||||
ienv "github.com/docker/docker/testutil/environment"
|
||||
"github.com/docker/docker/testutil/fakestorage"
|
||||
|
@ -50,8 +49,6 @@ var (
|
|||
func init() {
|
||||
var err error
|
||||
|
||||
reexec.Init() // This is required for external graphdriver tests
|
||||
|
||||
testEnv, err = environment.New()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
41
integration/image/inspect_test.go
Normal file
41
integration/image/inspect_test.go
Normal file
|
@ -0,0 +1,41 @@
|
|||
package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/testutil/environment"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
// Regression test for: https://github.com/moby/moby/issues/45556
|
||||
func TestImageInspectEmptyTagsAndDigests(t *testing.T) {
|
||||
skip.If(t, testEnv.OSType == "windows", "build-empty-images is not called on Windows")
|
||||
defer setupTest(t)()
|
||||
|
||||
client := testEnv.APIClient()
|
||||
ctx := context.Background()
|
||||
|
||||
danglingId := environment.DanglingImageIdGraphDriver
|
||||
if testEnv.UsingSnapshotter() {
|
||||
danglingId = environment.DanglingImageIdSnapshotter
|
||||
}
|
||||
|
||||
inspect, raw, err := client.ImageInspectWithRaw(ctx, danglingId)
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Must be a zero length array, not null.
|
||||
assert.Check(t, is.Len(inspect.RepoTags, 0))
|
||||
assert.Check(t, is.Len(inspect.RepoDigests, 0))
|
||||
|
||||
var rawJson map[string]interface{}
|
||||
err = json.Unmarshal(raw, &rawJson)
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Check if the raw json is also an array, not null.
|
||||
assert.Check(t, is.Len(rawJson["RepoTags"], 0))
|
||||
assert.Check(t, is.Len(rawJson["RepoDigests"], 0))
|
||||
}
|
|
@ -5,16 +5,12 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/docker/docker/testutil/environment"
|
||||
)
|
||||
|
||||
var testEnv *environment.Execution
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
var err error
|
||||
testEnv, err = environment.New()
|
||||
if err != nil {
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/docker/docker/testutil/environment"
|
||||
)
|
||||
|
||||
|
@ -13,10 +12,6 @@ var (
|
|||
testEnv *environment.Execution
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Init() // This is required for external graphdriver tests
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var err error
|
||||
testEnv, err = environment.New()
|
||||
|
|
|
@ -60,6 +60,10 @@ type container struct {
|
|||
type task struct {
|
||||
containerd.Task
|
||||
ctr *container
|
||||
|
||||
// Workaround for https://github.com/containerd/containerd/issues/8557.
|
||||
// See also https://github.com/moby/moby/issues/45595.
|
||||
serializeExecStartsWorkaround sync.Mutex
|
||||
}
|
||||
|
||||
type process struct {
|
||||
|
@ -296,7 +300,12 @@ func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process,
|
|||
// the stdin of exec process will be created after p.Start in containerd
|
||||
defer func() { stdinCloseSync <- p }()
|
||||
|
||||
if err = p.Start(ctx); err != nil {
|
||||
err = func() error {
|
||||
t.serializeExecStartsWorkaround.Lock()
|
||||
defer t.serializeExecStartsWorkaround.Unlock()
|
||||
return p.Start(ctx)
|
||||
}()
|
||||
if err != nil {
|
||||
// use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure
|
||||
// we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in
|
||||
// older containerd-shim
|
||||
|
|
|
@ -8,14 +8,9 @@ import (
|
|||
"github.com/docker/docker/libnetwork/config"
|
||||
"github.com/docker/docker/libnetwork/netlabel"
|
||||
"github.com/docker/docker/libnetwork/options"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
|
||||
// Select and configure the network driver
|
||||
networkType := "bridge"
|
||||
|
||||
|
|
|
@ -4,23 +4,14 @@
|
|||
package bridge
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/libnetwork/netlabel"
|
||||
"github.com/docker/docker/libnetwork/ns"
|
||||
"github.com/docker/docker/libnetwork/testutils"
|
||||
"github.com/docker/docker/libnetwork/types"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestPortMappingConfig(t *testing.T) {
|
||||
defer testutils.SetupTestOSContext(t)()
|
||||
d := newDriver()
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/libnetwork"
|
||||
|
@ -23,19 +22,9 @@ import (
|
|||
"github.com/docker/docker/libnetwork/testutils"
|
||||
"github.com/docker/docker/libnetwork/types"
|
||||
"github.com/docker/docker/pkg/plugins"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if runtime.GOOS == "windows" {
|
||||
logrus.Info("Test suite does not currently support windows")
|
||||
os.Exit(0)
|
||||
}
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
|
||||
// Cleanup local datastore file
|
||||
_ = os.Remove(datastore.DefaultScope("").Client.Address)
|
||||
|
||||
|
|
|
@ -600,24 +600,29 @@ func (n *networkNamespace) checkLoV6() {
|
|||
}
|
||||
|
||||
func setIPv6(nspath, iface string, enable bool) error {
|
||||
origNS, err := netns.Get()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get current network namespace: %w", err)
|
||||
}
|
||||
defer origNS.Close()
|
||||
|
||||
namespace, err := netns.GetFromPath(nspath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed get network namespace %q: %w", nspath, err)
|
||||
}
|
||||
defer namespace.Close()
|
||||
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
defer close(errCh)
|
||||
|
||||
namespace, err := netns.GetFromPath(nspath)
|
||||
if err != nil {
|
||||
errCh <- fmt.Errorf("failed get network namespace %q: %w", nspath, err)
|
||||
return
|
||||
}
|
||||
defer namespace.Close()
|
||||
|
||||
runtime.LockOSThread()
|
||||
|
||||
origNS, err := netns.Get()
|
||||
if err != nil {
|
||||
runtime.UnlockOSThread()
|
||||
errCh <- fmt.Errorf("failed to get current network namespace: %w", err)
|
||||
return
|
||||
}
|
||||
defer origNS.Close()
|
||||
|
||||
if err = netns.Set(namespace); err != nil {
|
||||
runtime.UnlockOSThread()
|
||||
errCh <- fmt.Errorf("setting into container netns %q failed: %w", nspath, err)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -15,7 +15,6 @@ import (
|
|||
"github.com/docker/docker/libnetwork/ns"
|
||||
"github.com/docker/docker/libnetwork/testutils"
|
||||
"github.com/docker/docker/libnetwork/types"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/vishvananda/netlink"
|
||||
"github.com/vishvananda/netlink/nl"
|
||||
"github.com/vishvananda/netns"
|
||||
|
@ -382,13 +381,6 @@ func TestLiveRestore(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestSandboxCreate(t *testing.T) {
|
||||
defer testutils.SetupTestOSContext(t)()
|
||||
|
||||
|
|
|
@ -71,6 +71,7 @@ type Resolver struct {
|
|||
listenAddress string
|
||||
proxyDNS bool
|
||||
startCh chan struct{}
|
||||
logger *logrus.Logger
|
||||
|
||||
fwdSem *semaphore.Weighted // Limit the number of concurrent external DNS requests in-flight
|
||||
logInverval rate.Sometimes // Rate-limit logging about hitting the fwdSem limit
|
||||
|
@ -89,6 +90,13 @@ func NewResolver(address string, proxyDNS bool, backend DNSBackend) *Resolver {
|
|||
}
|
||||
}
|
||||
|
||||
func (r *Resolver) log() *logrus.Logger {
|
||||
if r.logger == nil {
|
||||
return logrus.StandardLogger()
|
||||
}
|
||||
return r.logger
|
||||
}
|
||||
|
||||
// SetupFunc returns the setup function that should be run in the container's
|
||||
// network namespace.
|
||||
func (r *Resolver) SetupFunc(port int) func() {
|
||||
|
@ -140,7 +148,7 @@ func (r *Resolver) Start() error {
|
|||
r.server = s
|
||||
go func() {
|
||||
if err := s.ActivateAndServe(); err != nil {
|
||||
logrus.WithError(err).Error("[resolver] failed to start PacketConn DNS server")
|
||||
r.log().WithError(err).Error("[resolver] failed to start PacketConn DNS server")
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -148,7 +156,7 @@ func (r *Resolver) Start() error {
|
|||
r.tcpServer = tcpServer
|
||||
go func() {
|
||||
if err := tcpServer.ActivateAndServe(); err != nil {
|
||||
logrus.WithError(err).Error("[resolver] failed to start TCP DNS server")
|
||||
r.log().WithError(err).Error("[resolver] failed to start TCP DNS server")
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
|
@ -249,7 +257,7 @@ func (r *Resolver) handleIPQuery(query *dns.Msg, ipType int) (*dns.Msg, error) {
|
|||
|
||||
if addr == nil && ipv6Miss {
|
||||
// Send a reply without any Answer sections
|
||||
logrus.Debugf("[resolver] lookup name %s present without IPv6 address", name)
|
||||
r.log().Debugf("[resolver] lookup name %s present without IPv6 address", name)
|
||||
resp := createRespMsg(query)
|
||||
return resp, nil
|
||||
}
|
||||
|
@ -257,7 +265,7 @@ func (r *Resolver) handleIPQuery(query *dns.Msg, ipType int) (*dns.Msg, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
logrus.Debugf("[resolver] lookup for %s: IP %v", name, addr)
|
||||
r.log().Debugf("[resolver] lookup for %s: IP %v", name, addr)
|
||||
|
||||
resp := createRespMsg(query)
|
||||
if len(addr) > 1 {
|
||||
|
@ -298,7 +306,7 @@ func (r *Resolver) handlePTRQuery(query *dns.Msg) (*dns.Msg, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
logrus.Debugf("[resolver] lookup for IP %s: name %s", name, host)
|
||||
r.log().Debugf("[resolver] lookup for IP %s: name %s", name, host)
|
||||
fqdn := dns.Fqdn(host)
|
||||
|
||||
resp := new(dns.Msg)
|
||||
|
@ -365,17 +373,17 @@ func (r *Resolver) serveDNS(w dns.ResponseWriter, query *dns.Msg) {
|
|||
case dns.TypeSRV:
|
||||
resp, err = r.handleSRVQuery(query)
|
||||
default:
|
||||
logrus.Debugf("[resolver] query type %s is not supported by the embedded DNS and will be forwarded to external DNS", dns.TypeToString[queryType])
|
||||
r.log().Debugf("[resolver] query type %s is not supported by the embedded DNS and will be forwarded to external DNS", dns.TypeToString[queryType])
|
||||
}
|
||||
|
||||
reply := func(msg *dns.Msg) {
|
||||
if err = w.WriteMsg(msg); err != nil {
|
||||
logrus.WithError(err).Errorf("[resolver] failed to write response")
|
||||
r.log().WithError(err).Errorf("[resolver] failed to write response")
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("[resolver] failed to handle query: %s (%s)", queryName, dns.TypeToString[queryType])
|
||||
r.log().WithError(err).Errorf("[resolver] failed to handle query: %s (%s)", queryName, dns.TypeToString[queryType])
|
||||
reply(new(dns.Msg).SetRcode(query, dns.RcodeServerFailure))
|
||||
return
|
||||
}
|
||||
|
@ -449,7 +457,6 @@ func (r *Resolver) dialExtDNS(proto string, server extDNSEntry) (net.Conn, error
|
|||
}
|
||||
|
||||
func (r *Resolver) forwardExtDNS(proto string, query *dns.Msg) *dns.Msg {
|
||||
queryName, queryType := query.Question[0].Name, query.Question[0].Qtype
|
||||
for _, extDNS := range r.extDNSList {
|
||||
if extDNS.IPStr == "" {
|
||||
break
|
||||
|
@ -461,7 +468,7 @@ func (r *Resolver) forwardExtDNS(proto string, query *dns.Msg) *dns.Msg {
|
|||
cancel()
|
||||
if err != nil {
|
||||
r.logInverval.Do(func() {
|
||||
logrus.Errorf("[resolver] more than %v concurrent queries", maxConcurrent)
|
||||
r.log().Errorf("[resolver] more than %v concurrent queries", maxConcurrent)
|
||||
})
|
||||
return new(dns.Msg).SetRcode(query, dns.RcodeRefused)
|
||||
}
|
||||
|
@ -477,20 +484,7 @@ func (r *Resolver) forwardExtDNS(proto string, query *dns.Msg) *dns.Msg {
|
|||
case dns.RcodeServerFailure, dns.RcodeRefused:
|
||||
// Server returned FAILURE: continue with the next external DNS server
|
||||
// Server returned REFUSED: this can be a transitional status, so continue with the next external DNS server
|
||||
logrus.Debugf("[resolver] external DNS %s:%s responded with %s for %q", proto, extDNS.IPStr, statusString(resp.Rcode), queryName)
|
||||
continue
|
||||
case dns.RcodeNameError:
|
||||
// Server returned NXDOMAIN. Stop resolution if it's an authoritative answer (see RFC 8020: https://tools.ietf.org/html/rfc8020#section-2)
|
||||
logrus.Debugf("[resolver] external DNS %s:%s responded with %s for %q", proto, extDNS.IPStr, statusString(resp.Rcode), queryName)
|
||||
if resp.Authoritative {
|
||||
break
|
||||
}
|
||||
continue
|
||||
case dns.RcodeSuccess:
|
||||
// All is well
|
||||
default:
|
||||
// Server gave some error. Log the error, and continue with the next external DNS server
|
||||
logrus.Debugf("[resolver] external DNS %s:%s responded with %s (code %d) for %q", proto, extDNS.IPStr, statusString(resp.Rcode), resp.Rcode, queryName)
|
||||
r.log().Debugf("[resolver] external DNS %s:%s returned failure:\n%s", proto, extDNS.IPStr, resp)
|
||||
continue
|
||||
}
|
||||
answers := 0
|
||||
|
@ -500,17 +494,17 @@ func (r *Resolver) forwardExtDNS(proto string, query *dns.Msg) *dns.Msg {
|
|||
case dns.TypeA:
|
||||
answers++
|
||||
ip := rr.(*dns.A).A
|
||||
logrus.Debugf("[resolver] received A record %q for %q from %s:%s", ip, h.Name, proto, extDNS.IPStr)
|
||||
r.log().Debugf("[resolver] received A record %q for %q from %s:%s", ip, h.Name, proto, extDNS.IPStr)
|
||||
r.backend.HandleQueryResp(h.Name, ip)
|
||||
case dns.TypeAAAA:
|
||||
answers++
|
||||
ip := rr.(*dns.AAAA).AAAA
|
||||
logrus.Debugf("[resolver] received AAAA record %q for %q from %s:%s", ip, h.Name, proto, extDNS.IPStr)
|
||||
r.log().Debugf("[resolver] received AAAA record %q for %q from %s:%s", ip, h.Name, proto, extDNS.IPStr)
|
||||
r.backend.HandleQueryResp(h.Name, ip)
|
||||
}
|
||||
}
|
||||
if resp.Answer == nil || answers == 0 {
|
||||
logrus.Debugf("[resolver] external DNS %s:%s did not return any %s records for %q", proto, extDNS.IPStr, dns.TypeToString[queryType], queryName)
|
||||
if len(resp.Answer) == 0 {
|
||||
r.log().Debugf("[resolver] external DNS %s:%s returned response with no answers:\n%s", proto, extDNS.IPStr, resp)
|
||||
}
|
||||
resp.Compress = true
|
||||
return resp
|
||||
|
@ -522,12 +516,12 @@ func (r *Resolver) forwardExtDNS(proto string, query *dns.Msg) *dns.Msg {
|
|||
func (r *Resolver) exchange(proto string, extDNS extDNSEntry, query *dns.Msg) *dns.Msg {
|
||||
extConn, err := r.dialExtDNS(proto, extDNS)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warn("[resolver] connect failed")
|
||||
r.log().WithError(err).Warn("[resolver] connect failed")
|
||||
return nil
|
||||
}
|
||||
defer extConn.Close()
|
||||
|
||||
log := logrus.WithFields(logrus.Fields{
|
||||
log := r.log().WithFields(logrus.Fields{
|
||||
"dns-server": extConn.RemoteAddr().Network() + ":" + extConn.RemoteAddr().String(),
|
||||
"client-addr": extConn.LocalAddr().Network() + ":" + extConn.LocalAddr().String(),
|
||||
"question": query.Question[0].String(),
|
||||
|
@ -548,7 +542,7 @@ func (r *Resolver) exchange(proto string, extDNS extDNSEntry, query *dns.Msg) *d
|
|||
UDPSize: dns.MaxMsgSize,
|
||||
}).ExchangeWithConn(query, &dns.Conn{Conn: extConn})
|
||||
if err != nil {
|
||||
logrus.WithError(err).Errorf("[resolver] failed to query DNS server: %s, query: %s", extConn.RemoteAddr().String(), query.Question[0].String())
|
||||
r.log().WithError(err).Errorf("[resolver] failed to query DNS server: %s, query: %s", extConn.RemoteAddr().String(), query.Question[0].String())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -558,10 +552,3 @@ func (r *Resolver) exchange(proto string, extDNS extDNSEntry, query *dns.Msg) *d
|
|||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
func statusString(responseCode int) string {
|
||||
if s, ok := dns.RcodeToString[responseCode]; ok {
|
||||
return s
|
||||
}
|
||||
return "UNKNOWN"
|
||||
}
|
||||
|
|
|
@ -13,21 +13,28 @@ import (
|
|||
"github.com/miekg/dns"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gotest.tools/v3/assert"
|
||||
is "gotest.tools/v3/assert/cmp"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
// a simple/null address type that will be used to fake a local address for unit testing
|
||||
type tstaddr struct {
|
||||
network string
|
||||
}
|
||||
|
||||
func (a *tstaddr) Network() string { return "tcp" }
|
||||
func (a *tstaddr) Network() string {
|
||||
if a.network != "" {
|
||||
return a.network
|
||||
}
|
||||
return "tcp"
|
||||
}
|
||||
|
||||
func (a *tstaddr) String() string { return "127.0.0.1" }
|
||||
func (a *tstaddr) String() string { return "(fake)" }
|
||||
|
||||
// a simple writer that implements dns.ResponseWriter for unit testing purposes
|
||||
type tstwriter struct {
|
||||
localAddr net.Addr
|
||||
msg *dns.Msg
|
||||
network string
|
||||
msg *dns.Msg
|
||||
}
|
||||
|
||||
func (w *tstwriter) WriteMsg(m *dns.Msg) (err error) {
|
||||
|
@ -38,13 +45,12 @@ func (w *tstwriter) WriteMsg(m *dns.Msg) (err error) {
|
|||
func (w *tstwriter) Write(m []byte) (int, error) { return 0, nil }
|
||||
|
||||
func (w *tstwriter) LocalAddr() net.Addr {
|
||||
if w.localAddr != nil {
|
||||
return w.localAddr
|
||||
}
|
||||
return new(tstaddr)
|
||||
return &tstaddr{network: w.network}
|
||||
}
|
||||
|
||||
func (w *tstwriter) RemoteAddr() net.Addr { return new(tstaddr) }
|
||||
func (w *tstwriter) RemoteAddr() net.Addr {
|
||||
return &tstaddr{network: w.network}
|
||||
}
|
||||
|
||||
func (w *tstwriter) TsigStatus() error { return nil }
|
||||
|
||||
|
@ -371,15 +377,14 @@ func TestOversizedDNSReply(t *testing.T) {
|
|||
|
||||
srvAddr := srv.LocalAddr().(*net.UDPAddr)
|
||||
rsv := NewResolver("", true, noopDNSBackend{})
|
||||
// The resolver logs lots of valuable info at level debug. Redirect it
|
||||
// to t.Log() so the log spew is emitted only if the test fails.
|
||||
rsv.logger = testLogger(t)
|
||||
rsv.SetExtServers([]extDNSEntry{
|
||||
{IPStr: srvAddr.IP.String(), port: uint16(srvAddr.Port), HostLoopback: true},
|
||||
})
|
||||
|
||||
// The resolver logs lots of valuable info at level debug. Redirect it
|
||||
// to t.Log() so the log spew is emitted only if the test fails.
|
||||
defer redirectLogrusTo(t)()
|
||||
|
||||
w := &tstwriter{localAddr: srv.LocalAddr()}
|
||||
w := &tstwriter{network: srvAddr.Network()}
|
||||
q := new(dns.Msg).SetQuestion("s3.amazonaws.com.", dns.TypeA)
|
||||
rsv.serveDNS(w, q)
|
||||
resp := w.GetResponse()
|
||||
|
@ -390,14 +395,11 @@ func TestOversizedDNSReply(t *testing.T) {
|
|||
checkDNSRRType(t, resp.Answer[0].Header().Rrtype, dns.TypeA)
|
||||
}
|
||||
|
||||
func redirectLogrusTo(t *testing.T) func() {
|
||||
oldLevel, oldOut := logrus.StandardLogger().Level, logrus.StandardLogger().Out
|
||||
logrus.StandardLogger().SetLevel(logrus.DebugLevel)
|
||||
logrus.SetOutput(tlogWriter{t})
|
||||
return func() {
|
||||
logrus.StandardLogger().SetLevel(oldLevel)
|
||||
logrus.StandardLogger().SetOutput(oldOut)
|
||||
}
|
||||
func testLogger(t *testing.T) *logrus.Logger {
|
||||
logger := logrus.New()
|
||||
logger.SetLevel(logrus.DebugLevel)
|
||||
logger.SetOutput(tlogWriter{t})
|
||||
return logger
|
||||
}
|
||||
|
||||
type tlogWriter struct{ t *testing.T }
|
||||
|
@ -439,9 +441,8 @@ func TestReplySERVFAIL(t *testing.T) {
|
|||
}
|
||||
for _, tt := range cases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
defer redirectLogrusTo(t)
|
||||
|
||||
rsv := NewResolver("", tt.proxyDNS, badSRVDNSBackend{})
|
||||
rsv.logger = testLogger(t)
|
||||
w := &tstwriter{}
|
||||
rsv.serveDNS(w, tt.q)
|
||||
resp := w.GetResponse()
|
||||
|
@ -457,3 +458,67 @@ type badSRVDNSBackend struct{ noopDNSBackend }
|
|||
func (badSRVDNSBackend) ResolveService(name string) ([]*net.SRV, []net.IP) {
|
||||
return []*net.SRV{nil, nil, nil}, nil // Mismatched slice lengths
|
||||
}
|
||||
|
||||
func TestProxyNXDOMAIN(t *testing.T) {
|
||||
mockSOA, err := dns.NewRR(". 86367 IN SOA a.root-servers.net. nstld.verisign-grs.com. 2023051800 1800 900 604800 86400\n")
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, mockSOA != nil)
|
||||
|
||||
serveStarted := make(chan struct{})
|
||||
srv := &dns.Server{
|
||||
Net: "udp",
|
||||
Addr: "127.0.0.1:0",
|
||||
Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
|
||||
msg := new(dns.Msg).SetRcode(r, dns.RcodeNameError)
|
||||
msg.Ns = append(msg.Ns, dns.Copy(mockSOA))
|
||||
w.WriteMsg(msg)
|
||||
}),
|
||||
NotifyStartedFunc: func() { close(serveStarted) },
|
||||
}
|
||||
serveDone := make(chan error, 1)
|
||||
go func() {
|
||||
defer close(serveDone)
|
||||
serveDone <- srv.ListenAndServe()
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-serveDone:
|
||||
t.Fatal(err)
|
||||
case <-serveStarted:
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := srv.Shutdown(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
<-serveDone
|
||||
}()
|
||||
|
||||
// This test, by virtue of running a server and client in different
|
||||
// not-locked-to-thread goroutines, happens to be a good canary for
|
||||
// whether we are leaking unlocked OS threads set to the wrong network
|
||||
// namespace. Make a best-effort attempt to detect that situation so we
|
||||
// are not left chasing ghosts next time.
|
||||
testutils.AssertSocketSameNetNS(t, srv.PacketConn.(*net.UDPConn))
|
||||
|
||||
srvAddr := srv.PacketConn.LocalAddr().(*net.UDPAddr)
|
||||
rsv := NewResolver("", true, noopDNSBackend{})
|
||||
rsv.SetExtServers([]extDNSEntry{
|
||||
{IPStr: srvAddr.IP.String(), port: uint16(srvAddr.Port), HostLoopback: true},
|
||||
})
|
||||
|
||||
// The resolver logs lots of valuable info at level debug. Redirect it
|
||||
// to t.Log() so the log spew is emitted only if the test fails.
|
||||
rsv.logger = testLogger(t)
|
||||
|
||||
w := &tstwriter{network: srvAddr.Network()}
|
||||
q := new(dns.Msg).SetQuestion("example.net.", dns.TypeA)
|
||||
rsv.serveDNS(w, q)
|
||||
resp := w.GetResponse()
|
||||
checkNonNullResponse(t, resp)
|
||||
t.Log("Response:\n" + resp.String())
|
||||
checkDNSResponseCode(t, resp, dns.RcodeNameError)
|
||||
assert.Assert(t, is.Len(resp.Answer, 0))
|
||||
assert.Assert(t, is.Len(resp.Ns, 1))
|
||||
assert.Equal(t, resp.Ns[0].String(), mockSOA.String())
|
||||
}
|
||||
|
|
43
libnetwork/testutils/sanity_linux.go
Normal file
43
libnetwork/testutils/sanity_linux.go
Normal file
|
@ -0,0 +1,43 @@
|
|||
package testutils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/vishvananda/netns"
|
||||
"golang.org/x/sys/unix"
|
||||
"gotest.tools/v3/assert"
|
||||
)
|
||||
|
||||
// AssertSocketSameNetNS makes a best-effort attempt to assert that conn is in
|
||||
// the same network namespace as the current goroutine's thread.
|
||||
func AssertSocketSameNetNS(t testing.TB, conn syscall.Conn) {
|
||||
t.Helper()
|
||||
|
||||
sc, err := conn.SyscallConn()
|
||||
assert.NilError(t, err)
|
||||
sc.Control(func(fd uintptr) {
|
||||
srvnsfd, err := unix.IoctlRetInt(int(fd), unix.SIOCGSKNS)
|
||||
if err != nil {
|
||||
if errors.Is(err, unix.EPERM) {
|
||||
t.Log("Cannot determine socket's network namespace. Do we have CAP_NET_ADMIN?")
|
||||
return
|
||||
}
|
||||
if errors.Is(err, unix.ENOSYS) {
|
||||
t.Log("Cannot query socket's network namespace due to missing kernel support.")
|
||||
return
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
srvns := netns.NsHandle(srvnsfd)
|
||||
defer srvns.Close()
|
||||
|
||||
curns, err := netns.Get()
|
||||
assert.NilError(t, err)
|
||||
defer curns.Close()
|
||||
if !srvns.Equal(curns) {
|
||||
t.Fatalf("Socket is in network namespace %s, but test goroutine is in %s", srvns, curns)
|
||||
}
|
||||
})
|
||||
}
|
11
libnetwork/testutils/sanity_notlinux.go
Normal file
11
libnetwork/testutils/sanity_notlinux.go
Normal file
|
@ -0,0 +1,11 @@
|
|||
//go:build !linux
|
||||
|
||||
package testutils
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// AssertSocketSameNetNS is a no-op on platforms other than Linux.
|
||||
func AssertSocketSameNetNS(t testing.TB, conn syscall.Conn) {}
|
|
@ -13,6 +13,8 @@ const (
|
|||
)
|
||||
|
||||
// GetVersion returns the major and minor version of apparmor_parser.
|
||||
//
|
||||
// Deprecated: no longer used, and will be removed in the next release.
|
||||
func GetVersion() (int, error) {
|
||||
output, err := cmd("", "--version")
|
||||
if err != nil {
|
||||
|
|
|
@ -14,14 +14,9 @@ import (
|
|||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Init()
|
||||
}
|
||||
|
||||
var chrootArchiver = NewArchiver(idtools.IdentityMapping{})
|
||||
|
||||
func TarUntar(src, dst string) error {
|
||||
|
|
|
@ -14,10 +14,8 @@ import (
|
|||
"github.com/docker/docker/pkg/aaparser"
|
||||
)
|
||||
|
||||
var (
|
||||
// profileDirectory is the file store for apparmor profiles and macros.
|
||||
profileDirectory = "/etc/apparmor.d"
|
||||
)
|
||||
// profileDirectory is the file store for apparmor profiles and macros.
|
||||
const profileDirectory = "/etc/apparmor.d"
|
||||
|
||||
// profileData holds information about the given profile for generation.
|
||||
type profileData struct {
|
||||
|
@ -29,8 +27,6 @@ type profileData struct {
|
|||
Imports []string
|
||||
// InnerImports defines the apparmor functions to import in the profile.
|
||||
InnerImports []string
|
||||
// Version is the {major, minor, patch} version of apparmor_parser as a single number.
|
||||
Version int
|
||||
}
|
||||
|
||||
// generateDefault creates an apparmor profile from ProfileData.
|
||||
|
@ -50,12 +46,6 @@ func (p *profileData) generateDefault(out io.Writer) error {
|
|||
p.InnerImports = append(p.InnerImports, "#include <abstractions/base>")
|
||||
}
|
||||
|
||||
ver, err := aaparser.GetVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Version = ver
|
||||
|
||||
return compiled.Execute(out, p)
|
||||
}
|
||||
|
||||
|
|
|
@ -77,10 +77,7 @@ func decodeContainerConfig(src io.Reader, si *sysinfo.SysInfo) (*container.Confi
|
|||
func loadJSON(src io.Reader, out interface{}) error {
|
||||
dec := json.NewDecoder(src)
|
||||
if err := dec.Decode(&out); err != nil {
|
||||
if err == io.EOF {
|
||||
return validationError("invalid JSON: got EOF while reading request body")
|
||||
}
|
||||
return validationError("invalid JSON: " + err.Error())
|
||||
return invalidJSONError{Err: err}
|
||||
}
|
||||
if dec.More() {
|
||||
return validationError("unexpected content after JSON")
|
||||
|
|
|
@ -40,3 +40,17 @@ func (e validationError) Error() string {
|
|||
}
|
||||
|
||||
func (e validationError) InvalidParameter() {}
|
||||
|
||||
type invalidJSONError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e invalidJSONError) Error() string {
|
||||
return "invalid JSON: " + e.Err.Error()
|
||||
}
|
||||
|
||||
func (e invalidJSONError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
func (e invalidJSONError) InvalidParameter() {}
|
||||
|
|
|
@ -98,6 +98,9 @@ func deleteAllImages(t testing.TB, apiclient client.ImageAPIClient, protectedIma
|
|||
ctx := context.Background()
|
||||
for _, image := range images {
|
||||
tags := tagsFromImageSummary(image)
|
||||
if _, ok := protectedImages[image.ID]; ok {
|
||||
continue
|
||||
}
|
||||
if len(tags) == 0 {
|
||||
removeImage(ctx, t, apiclient, image.ID)
|
||||
continue
|
||||
|
|
|
@ -193,6 +193,13 @@ func (e *Execution) IsUserNamespaceInKernel() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// UsingSnapshotter returns whether containerd snapshotters are used for the
|
||||
// tests by checking if the "TEST_INTEGRATION_USE_SNAPSHOTTER" is set to a
|
||||
// non-empty value.
|
||||
func (e *Execution) UsingSnapshotter() bool {
|
||||
return os.Getenv("TEST_INTEGRATION_USE_SNAPSHOTTER") != ""
|
||||
}
|
||||
|
||||
// HasExistingImage checks whether there is an image with the given reference.
|
||||
// Note that this is done by filtering and then checking whether there were any
|
||||
// results -- so ambiguous references might result in false-positives.
|
||||
|
|
|
@ -95,6 +95,7 @@ func ProtectImages(t testing.TB, testEnv *Execution) {
|
|||
images = append(images, frozenImages...)
|
||||
}
|
||||
testEnv.ProtectImage(t, images...)
|
||||
testEnv.ProtectImage(t, DanglingImageIdGraphDriver, DanglingImageIdSnapshotter)
|
||||
}
|
||||
|
||||
func getExistingImages(t testing.TB, testEnv *Execution) []string {
|
||||
|
|
7
testutil/environment/special_images.go
Normal file
7
testutil/environment/special_images.go
Normal file
|
@ -0,0 +1,7 @@
|
|||
package environment
|
||||
|
||||
// Graph driver image store identifies images by the ID of their config.
|
||||
const DanglingImageIdGraphDriver = "sha256:0df1207206e5288f4a989a2f13d1f5b3c4e70467702c1d5d21dfc9f002b7bd43"
|
||||
|
||||
// The containerd image store identifies images by the ID of their manifest/manifest list.
|
||||
const DanglingImageIdSnapshotter = "sha256:16d365089e5c10e1673ee82ab5bba38ade9b763296ad918bd24b42a1156c5456"
|
|
@ -32,7 +32,7 @@ require (
|
|||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
github.com/creack/pty v1.1.18
|
||||
github.com/deckarep/golang-set/v2 v2.3.0
|
||||
github.com/docker/distribution v2.8.1+incompatible
|
||||
github.com/docker/distribution v2.8.2+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c
|
||||
github.com/docker/go-metrics v0.0.1
|
||||
|
@ -56,7 +56,7 @@ require (
|
|||
github.com/klauspost/compress v1.16.3
|
||||
github.com/miekg/dns v1.1.43
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
|
||||
github.com/moby/buildkit v0.11.6 // IMPORTANT: when updating, also update the version in builder/builder-next/worker/worker.go
|
||||
github.com/moby/buildkit v0.11.7-0.20230525183624-798ad6b0ce9f // IMPORTANT: when updating, also update the version in builder/builder-next/worker/worker.go
|
||||
github.com/moby/ipvs v1.1.0
|
||||
github.com/moby/locker v1.0.1
|
||||
github.com/moby/patternmatcher v0.5.0
|
||||
|
@ -118,7 +118,7 @@ require (
|
|||
github.com/containerd/cgroups v1.0.4 // indirect
|
||||
github.com/containerd/console v1.0.3 // indirect
|
||||
github.com/containerd/go-cni v1.1.6 // indirect
|
||||
github.com/containerd/go-runc v1.0.0 // indirect
|
||||
github.com/containerd/go-runc v1.1.0 // indirect
|
||||
github.com/containerd/nydus-snapshotter v0.3.1 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect
|
||||
github.com/containerd/ttrpc v1.1.1 // indirect
|
||||
|
@ -152,8 +152,6 @@ require (
|
|||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.1.4 // indirect
|
||||
github.com/onsi/gomega v1.20.1 // indirect
|
||||
github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 // indirect
|
||||
github.com/philhofer/fwd v1.1.2 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
|
|
17
vendor.sum
17
vendor.sum
|
@ -397,8 +397,9 @@ github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH
|
|||
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
|
||||
github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
|
||||
github.com/containerd/go-runc v1.0.0 h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0=
|
||||
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
|
||||
github.com/containerd/go-runc v1.1.0 h1:OX4f+/i2y5sUT7LhmcJH7GYrjjhHa1QI4e8yO0gGleA=
|
||||
github.com/containerd/go-runc v1.1.0/go.mod h1:xJv2hFF7GvHtTJd9JqTS2UVxMkULUYw4JN5XAUZqH5U=
|
||||
github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
|
||||
github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
|
||||
github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
|
||||
|
@ -502,8 +503,8 @@ github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TT
|
|||
github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.0.0-20200511152416-a93e9eb0e95c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
|
@ -1041,8 +1042,8 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
|
|||
github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
|
||||
github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ=
|
||||
github.com/moby/buildkit v0.11.6 h1:VYNdoKk5TVxN7k4RvZgdeM4GOyRvIi4Z8MXOY7xvyUs=
|
||||
github.com/moby/buildkit v0.11.6/go.mod h1:GCqKfHhz+pddzfgaR7WmHVEE3nKKZMMDPpK8mh3ZLv4=
|
||||
github.com/moby/buildkit v0.11.7-0.20230525183624-798ad6b0ce9f h1:9wobL03Y6U8azuDLUqYblbUdVU9jpjqecDdW7w4wZtI=
|
||||
github.com/moby/buildkit v0.11.7-0.20230525183624-798ad6b0ce9f/go.mod h1:GCqKfHhz+pddzfgaR7WmHVEE3nKKZMMDPpK8mh3ZLv4=
|
||||
github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ=
|
||||
github.com/moby/ipvs v1.1.0/go.mod h1:4VJMWuf098bsUMmZEiD4Tjk/O7mOn3l1PTD3s4OoYAs=
|
||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||
|
@ -1112,9 +1113,8 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0
|
|||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY=
|
||||
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
|
||||
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
|
@ -1125,9 +1125,8 @@ github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
|
|||
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
|
||||
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
|
||||
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
|
||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
|
|
20
vendor/github.com/containerd/go-runc/.golangci.yml
generated
vendored
Normal file
20
vendor/github.com/containerd/go-runc/.golangci.yml
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
linters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- unused
|
||||
- vet
|
||||
disable:
|
||||
- errcheck
|
||||
|
||||
issues:
|
||||
include:
|
||||
- EXC0002
|
||||
|
||||
run:
|
||||
timeout: 2m
|
21
vendor/github.com/containerd/go-runc/.travis.yml
generated
vendored
21
vendor/github.com/containerd/go-runc/.travis.yml
generated
vendored
|
@ -1,21 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.13.x
|
||||
- 1.14.x
|
||||
- 1.15.x
|
||||
|
||||
install:
|
||||
- go get -t ./...
|
||||
- go get -u github.com/vbatts/git-validation
|
||||
- go get -u github.com/kunalkushwaha/ltag
|
||||
|
||||
before_script:
|
||||
- pushd ..; git clone https://github.com/containerd/project; popd
|
||||
|
||||
script:
|
||||
- DCO_VERBOSITY=-q ../project/script/validate/dco
|
||||
- ../project/script/validate/fileheader ../project/
|
||||
- go test -v -race -covermode=atomic -coverprofile=coverage.txt ./...
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
10
vendor/github.com/containerd/go-runc/README.md
generated
vendored
10
vendor/github.com/containerd/go-runc/README.md
generated
vendored
|
@ -1,7 +1,7 @@
|
|||
# go-runc
|
||||
|
||||
[](https://travis-ci.org/containerd/go-runc)
|
||||
[](https://codecov.io/gh/containerd/go-runc)
|
||||
[](https://github.com/containerd/go-runc/actions?query=workflow%3ACI)
|
||||
[](https://codecov.io/gh/containerd/go-runc)
|
||||
|
||||
This is a package for consuming the [runc](https://github.com/opencontainers/runc) binary in your Go applications.
|
||||
It tries to expose all the settings and features of the runc CLI. If there is something missing then add it, its opensource!
|
||||
|
@ -18,8 +18,8 @@ Docs can be found at [godoc.org](https://godoc.org/github.com/containerd/go-runc
|
|||
The go-runc is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
||||
As a containerd sub-project, you will find the:
|
||||
|
||||
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
||||
* [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
|
|
2
vendor/github.com/containerd/go-runc/command_other.go
generated
vendored
2
vendor/github.com/containerd/go-runc/command_other.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build !linux
|
||||
//go:build !linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
|
9
vendor/github.com/containerd/go-runc/console.go
generated
vendored
9
vendor/github.com/containerd/go-runc/console.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build !windows
|
||||
//go:build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
@ -20,7 +20,6 @@ package runc
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -53,7 +52,7 @@ func NewConsoleSocket(path string) (*Socket, error) {
|
|||
// On Close(), the socket is deleted
|
||||
func NewTempConsoleSocket() (*Socket, error) {
|
||||
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
||||
dir, err := ioutil.TempDir(runtimeDir, "pty")
|
||||
dir, err := os.MkdirTemp(runtimeDir, "pty")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -70,7 +69,7 @@ func NewTempConsoleSocket() (*Socket, error) {
|
|||
return nil, err
|
||||
}
|
||||
if runtimeDir != "" {
|
||||
if err := os.Chmod(abs, 0755|os.ModeSticky); err != nil {
|
||||
if err := os.Chmod(abs, 0o755|os.ModeSticky); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -96,7 +95,7 @@ func (c *Socket) Path() string {
|
|||
// locally (it is sent as non-auxiliary data in the same payload).
|
||||
func recvFd(socket *net.UnixConn) (*os.File, error) {
|
||||
const MaxNameLen = 4096
|
||||
var oobSpace = unix.CmsgSpace(4)
|
||||
oobSpace := unix.CmsgSpace(4)
|
||||
|
||||
name := make([]byte, MaxNameLen)
|
||||
oob := make([]byte, oobSpace)
|
||||
|
|
17
vendor/github.com/containerd/go-runc/events.go
generated
vendored
17
vendor/github.com/containerd/go-runc/events.go
generated
vendored
|
@ -16,6 +16,7 @@
|
|||
|
||||
package runc
|
||||
|
||||
// Event is a struct to pass runc event information
|
||||
type Event struct {
|
||||
// Type are the event type generated by runc
|
||||
// If the type is "error" then check the Err field on the event for
|
||||
|
@ -27,20 +28,23 @@ type Event struct {
|
|||
Err error `json:"-"`
|
||||
}
|
||||
|
||||
// Stats is statistical information from the runc process
|
||||
type Stats struct {
|
||||
Cpu Cpu `json:"cpu"`
|
||||
Cpu Cpu `json:"cpu"` //revive:disable
|
||||
Memory Memory `json:"memory"`
|
||||
Pids Pids `json:"pids"`
|
||||
Blkio Blkio `json:"blkio"`
|
||||
Hugetlb map[string]Hugetlb `json:"hugetlb"`
|
||||
}
|
||||
|
||||
// Hugetlb represents the detailed hugetlb component of the statistics data
|
||||
type Hugetlb struct {
|
||||
Usage uint64 `json:"usage,omitempty"`
|
||||
Max uint64 `json:"max,omitempty"`
|
||||
Failcnt uint64 `json:"failcnt"`
|
||||
}
|
||||
|
||||
// BlkioEntry represents a block IO entry in the IO stats
|
||||
type BlkioEntry struct {
|
||||
Major uint64 `json:"major,omitempty"`
|
||||
Minor uint64 `json:"minor,omitempty"`
|
||||
|
@ -48,6 +52,7 @@ type BlkioEntry struct {
|
|||
Value uint64 `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
// Blkio represents the statistical information from block IO devices
|
||||
type Blkio struct {
|
||||
IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"`
|
||||
IoServicedRecursive []BlkioEntry `json:"ioServicedRecursive,omitempty"`
|
||||
|
@ -59,17 +64,22 @@ type Blkio struct {
|
|||
SectorsRecursive []BlkioEntry `json:"sectorsRecursive,omitempty"`
|
||||
}
|
||||
|
||||
// Pids represents the process ID information
|
||||
type Pids struct {
|
||||
Current uint64 `json:"current,omitempty"`
|
||||
Limit uint64 `json:"limit,omitempty"`
|
||||
}
|
||||
|
||||
// Throttling represents the throttling statistics
|
||||
type Throttling struct {
|
||||
Periods uint64 `json:"periods,omitempty"`
|
||||
ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"`
|
||||
ThrottledTime uint64 `json:"throttledTime,omitempty"`
|
||||
}
|
||||
|
||||
// CpuUsage represents the CPU usage statistics
|
||||
//
|
||||
//revive:disable-next-line
|
||||
type CpuUsage struct {
|
||||
// Units: nanoseconds.
|
||||
Total uint64 `json:"total,omitempty"`
|
||||
|
@ -78,11 +88,15 @@ type CpuUsage struct {
|
|||
User uint64 `json:"user"`
|
||||
}
|
||||
|
||||
// Cpu represents the CPU usage and throttling statistics
|
||||
//
|
||||
//revive:disable-next-line
|
||||
type Cpu struct {
|
||||
Usage CpuUsage `json:"usage,omitempty"`
|
||||
Throttling Throttling `json:"throttling,omitempty"`
|
||||
}
|
||||
|
||||
// MemoryEntry represents an item in the memory use/statistics
|
||||
type MemoryEntry struct {
|
||||
Limit uint64 `json:"limit"`
|
||||
Usage uint64 `json:"usage,omitempty"`
|
||||
|
@ -90,6 +104,7 @@ type MemoryEntry struct {
|
|||
Failcnt uint64 `json:"failcnt"`
|
||||
}
|
||||
|
||||
// Memory represents the collection of memory statistics from the process
|
||||
type Memory struct {
|
||||
Cache uint64 `json:"cache,omitempty"`
|
||||
Usage MemoryEntry `json:"usage,omitempty"`
|
||||
|
|
12
vendor/github.com/containerd/go-runc/io.go
generated
vendored
12
vendor/github.com/containerd/go-runc/io.go
generated
vendored
|
@ -22,6 +22,7 @@ import (
|
|||
"os/exec"
|
||||
)
|
||||
|
||||
// IO is the terminal IO interface
|
||||
type IO interface {
|
||||
io.Closer
|
||||
Stdin() io.WriteCloser
|
||||
|
@ -30,6 +31,7 @@ type IO interface {
|
|||
Set(*exec.Cmd)
|
||||
}
|
||||
|
||||
// StartCloser is an interface to handle IO closure after start
|
||||
type StartCloser interface {
|
||||
CloseAfterStart() error
|
||||
}
|
||||
|
@ -76,6 +78,12 @@ func (p *pipe) Close() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// NewPipeIO creates pipe pairs to be used with runc. It is not implemented
|
||||
// on Windows.
|
||||
func NewPipeIO(uid, gid int, opts ...IOOpt) (i IO, err error) {
|
||||
return newPipeIO(uid, gid, opts...)
|
||||
}
|
||||
|
||||
type pipeIO struct {
|
||||
in *pipe
|
||||
out *pipe
|
||||
|
@ -144,12 +152,12 @@ func (i *pipeIO) Set(cmd *exec.Cmd) {
|
|||
}
|
||||
}
|
||||
|
||||
// NewSTDIO returns I/O setup for standard OS in/out/err usage
|
||||
func NewSTDIO() (IO, error) {
|
||||
return &stdio{}, nil
|
||||
}
|
||||
|
||||
type stdio struct {
|
||||
}
|
||||
type stdio struct{}
|
||||
|
||||
func (s *stdio) Close() error {
|
||||
return nil
|
||||
|
|
17
vendor/github.com/containerd/go-runc/io_unix.go
generated
vendored
17
vendor/github.com/containerd/go-runc/io_unix.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build !windows
|
||||
//go:build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
@ -19,14 +19,15 @@
|
|||
package runc
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// NewPipeIO creates pipe pairs to be used with runc
|
||||
func NewPipeIO(uid, gid int, opts ...IOOpt) (i IO, err error) {
|
||||
// newPipeIO creates pipe pairs to be used with runc
|
||||
func newPipeIO(uid, gid int, opts ...IOOpt) (i IO, err error) {
|
||||
option := defaultIOOption()
|
||||
for _, o := range opts {
|
||||
o(option)
|
||||
|
@ -54,7 +55,7 @@ func NewPipeIO(uid, gid int, opts ...IOOpt) (i IO, err error) {
|
|||
if runtime.GOOS == "darwin" {
|
||||
logrus.WithError(err).Debug("failed to chown stdin, ignored")
|
||||
} else {
|
||||
return nil, errors.Wrap(err, "failed to chown stdin")
|
||||
return nil, fmt.Errorf("failed to chown stdin: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -69,7 +70,7 @@ func NewPipeIO(uid, gid int, opts ...IOOpt) (i IO, err error) {
|
|||
if runtime.GOOS == "darwin" {
|
||||
logrus.WithError(err).Debug("failed to chown stdout, ignored")
|
||||
} else {
|
||||
return nil, errors.Wrap(err, "failed to chown stdout")
|
||||
return nil, fmt.Errorf("failed to chown stdout: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -84,7 +85,7 @@ func NewPipeIO(uid, gid int, opts ...IOOpt) (i IO, err error) {
|
|||
if runtime.GOOS == "darwin" {
|
||||
logrus.WithError(err).Debug("failed to chown stderr, ignored")
|
||||
} else {
|
||||
return nil, errors.Wrap(err, "failed to chown stderr")
|
||||
return nil, fmt.Errorf("failed to chown stderr: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
47
vendor/github.com/containerd/go-runc/io_windows.go
generated
vendored
47
vendor/github.com/containerd/go-runc/io_windows.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build windows
|
||||
//go:build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
@ -18,45 +18,8 @@
|
|||
|
||||
package runc
|
||||
|
||||
// NewPipeIO creates pipe pairs to be used with runc
|
||||
func NewPipeIO(opts ...IOOpt) (i IO, err error) {
|
||||
option := defaultIOOption()
|
||||
for _, o := range opts {
|
||||
o(option)
|
||||
}
|
||||
var (
|
||||
pipes []*pipe
|
||||
stdin, stdout, stderr *pipe
|
||||
)
|
||||
// cleanup in case of an error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
for _, p := range pipes {
|
||||
p.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
if option.OpenStdin {
|
||||
if stdin, err = newPipe(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pipes = append(pipes, stdin)
|
||||
}
|
||||
if option.OpenStdout {
|
||||
if stdout, err = newPipe(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pipes = append(pipes, stdout)
|
||||
}
|
||||
if option.OpenStderr {
|
||||
if stderr, err = newPipe(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pipes = append(pipes, stderr)
|
||||
}
|
||||
return &pipeIO{
|
||||
in: stdin,
|
||||
out: stdout,
|
||||
err: stderr,
|
||||
}, nil
|
||||
import "errors"
|
||||
|
||||
func newPipeIO(uid, gid int, opts ...IOOpt) (i IO, err error) {
|
||||
return nil, errors.New("not implemented on Windows")
|
||||
}
|
||||
|
|
54
vendor/github.com/containerd/go-runc/monitor.go
generated
vendored
54
vendor/github.com/containerd/go-runc/monitor.go
generated
vendored
|
@ -18,32 +18,37 @@ package runc
|
|||
|
||||
import (
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Monitor is the default ProcessMonitor for handling runc process exit
|
||||
var Monitor ProcessMonitor = &defaultMonitor{}
|
||||
|
||||
// Exit holds the exit information from a process
|
||||
type Exit struct {
|
||||
Timestamp time.Time
|
||||
Pid int
|
||||
Status int
|
||||
}
|
||||
|
||||
// ProcessMonitor is an interface for process monitoring
|
||||
// ProcessMonitor is an interface for process monitoring.
|
||||
//
|
||||
// It allows daemons using go-runc to have a SIGCHLD handler
|
||||
// to handle exits without introducing races between the handler
|
||||
// and go's exec.Cmd
|
||||
// These methods should match the methods exposed by exec.Cmd to provide
|
||||
// a consistent experience for the caller
|
||||
// and go's exec.Cmd.
|
||||
//
|
||||
// ProcessMonitor also provides a StartLocked method which is similar to
|
||||
// Start, but locks the goroutine used to start the process to an OS thread
|
||||
// (for example: when Pdeathsig is set).
|
||||
type ProcessMonitor interface {
|
||||
Start(*exec.Cmd) (chan Exit, error)
|
||||
StartLocked(*exec.Cmd) (chan Exit, error)
|
||||
Wait(*exec.Cmd, chan Exit) (int, error)
|
||||
}
|
||||
|
||||
type defaultMonitor struct {
|
||||
}
|
||||
type defaultMonitor struct{}
|
||||
|
||||
func (m *defaultMonitor) Start(c *exec.Cmd) (chan Exit, error) {
|
||||
if err := c.Start(); err != nil {
|
||||
|
@ -70,6 +75,43 @@ func (m *defaultMonitor) Start(c *exec.Cmd) (chan Exit, error) {
|
|||
return ec, nil
|
||||
}
|
||||
|
||||
// StartLocked is like Start, but locks the goroutine used to start the process to
|
||||
// the OS thread for use-cases where the parent thread matters to the child process
|
||||
// (for example: when Pdeathsig is set).
|
||||
func (m *defaultMonitor) StartLocked(c *exec.Cmd) (chan Exit, error) {
|
||||
started := make(chan error)
|
||||
ec := make(chan Exit, 1)
|
||||
go func() {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
if err := c.Start(); err != nil {
|
||||
started <- err
|
||||
return
|
||||
}
|
||||
close(started)
|
||||
var status int
|
||||
if err := c.Wait(); err != nil {
|
||||
status = 255
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
if ws, ok := exitErr.Sys().(syscall.WaitStatus); ok {
|
||||
status = ws.ExitStatus()
|
||||
}
|
||||
}
|
||||
}
|
||||
ec <- Exit{
|
||||
Timestamp: time.Now(),
|
||||
Pid: c.Process.Pid,
|
||||
Status: status,
|
||||
}
|
||||
close(ec)
|
||||
}()
|
||||
if err := <-started; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ec, nil
|
||||
}
|
||||
|
||||
func (m *defaultMonitor) Wait(c *exec.Cmd, ec chan Exit) (int, error) {
|
||||
e := <-ec
|
||||
return e.Status, nil
|
||||
|
|
217
vendor/github.com/containerd/go-runc/runc.go
generated
vendored
217
vendor/github.com/containerd/go-runc/runc.go
generated
vendored
|
@ -23,21 +23,22 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/runtime-spec/specs-go/features"
|
||||
)
|
||||
|
||||
// Format is the type of log formatting options avaliable
|
||||
// Format is the type of log formatting options available
|
||||
type Format string
|
||||
|
||||
// TopBody represents the structured data of the full ps output
|
||||
// TopResults represents the structured data of the full ps output
|
||||
type TopResults struct {
|
||||
// Processes running in the container, where each is process is an array of values corresponding to the headers
|
||||
Processes [][]string `json:"Processes"`
|
||||
|
@ -48,15 +49,53 @@ type TopResults struct {
|
|||
|
||||
const (
|
||||
none Format = ""
|
||||
// JSON represents the JSON format
|
||||
JSON Format = "json"
|
||||
// Text represents plain text format
|
||||
Text Format = "text"
|
||||
// DefaultCommand is the default command for Runc
|
||||
DefaultCommand = "runc"
|
||||
)
|
||||
|
||||
// DefaultCommand is the default command for Runc
|
||||
var DefaultCommand = "runc"
|
||||
|
||||
// Runc is the client to the runc cli
|
||||
type Runc struct {
|
||||
// Command overrides the name of the runc binary. If empty, DefaultCommand
|
||||
// is used.
|
||||
Command string
|
||||
Root string
|
||||
Debug bool
|
||||
Log string
|
||||
LogFormat Format
|
||||
// PdeathSignal sets a signal the child process will receive when the
|
||||
// parent dies.
|
||||
//
|
||||
// When Pdeathsig is set, command invocations will call runtime.LockOSThread
|
||||
// to prevent OS thread termination from spuriously triggering the
|
||||
// signal. See https://github.com/golang/go/issues/27505 and
|
||||
// https://github.com/golang/go/blob/126c22a09824a7b52c019ed9a1d198b4e7781676/src/syscall/exec_linux.go#L48-L51
|
||||
//
|
||||
// A program with GOMAXPROCS=1 might hang because of the use of
|
||||
// runtime.LockOSThread. Callers should ensure they retain at least one
|
||||
// unlocked thread.
|
||||
PdeathSignal syscall.Signal // using syscall.Signal to allow compilation on non-unix (unix.Syscall is an alias for syscall.Signal)
|
||||
Setpgid bool
|
||||
|
||||
// Criu sets the path to the criu binary used for checkpoint and restore.
|
||||
//
|
||||
// Deprecated: runc option --criu is now ignored (with a warning), and the
|
||||
// option will be removed entirely in a future release. Users who need a non-
|
||||
// standard criu binary should rely on the standard way of looking up binaries
|
||||
// in $PATH.
|
||||
Criu string
|
||||
SystemdCgroup bool
|
||||
Rootless *bool // nil stands for "auto"
|
||||
ExtraArgs []string
|
||||
}
|
||||
|
||||
// List returns all containers created inside the provided runc root directory
|
||||
func (r *Runc) List(context context.Context) ([]*Container, error) {
|
||||
data, err := cmdOutput(r.command(context, "list", "--format=json"), false, nil)
|
||||
data, err := r.cmdOutput(r.command(context, "list", "--format=json"), false, nil)
|
||||
defer putBuf(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -70,7 +109,7 @@ func (r *Runc) List(context context.Context) ([]*Container, error) {
|
|||
|
||||
// State returns the state for the container provided by id
|
||||
func (r *Runc) State(context context.Context, id string) (*Container, error) {
|
||||
data, err := cmdOutput(r.command(context, "state", id), true, nil)
|
||||
data, err := r.cmdOutput(r.command(context, "state", id), true, nil)
|
||||
defer putBuf(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %s", err, data.String())
|
||||
|
@ -82,10 +121,12 @@ func (r *Runc) State(context context.Context, id string) (*Container, error) {
|
|||
return &c, nil
|
||||
}
|
||||
|
||||
// ConsoleSocket handles the path of the socket for console access
|
||||
type ConsoleSocket interface {
|
||||
Path() string
|
||||
}
|
||||
|
||||
// CreateOpts holds all the options information for calling runc with supported options
|
||||
type CreateOpts struct {
|
||||
IO
|
||||
// PidFile is a path to where a pid file should be created
|
||||
|
@ -96,6 +137,7 @@ type CreateOpts struct {
|
|||
NoNewKeyring bool
|
||||
ExtraFiles []*os.File
|
||||
Started chan<- int
|
||||
ExtraArgs []string
|
||||
}
|
||||
|
||||
func (o *CreateOpts) args() (out []string, err error) {
|
||||
|
@ -121,38 +163,50 @@ func (o *CreateOpts) args() (out []string, err error) {
|
|||
if o.ExtraFiles != nil {
|
||||
out = append(out, "--preserve-fds", strconv.Itoa(len(o.ExtraFiles)))
|
||||
}
|
||||
if len(o.ExtraArgs) > 0 {
|
||||
out = append(out, o.ExtraArgs...)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (r *Runc) startCommand(cmd *exec.Cmd) (chan Exit, error) {
|
||||
if r.PdeathSignal != 0 {
|
||||
return Monitor.StartLocked(cmd)
|
||||
}
|
||||
return Monitor.Start(cmd)
|
||||
}
|
||||
|
||||
// Create creates a new container and returns its pid if it was created successfully
|
||||
func (r *Runc) Create(context context.Context, id, bundle string, opts *CreateOpts) error {
|
||||
args := []string{"create", "--bundle", bundle}
|
||||
if opts != nil {
|
||||
oargs, err := opts.args()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, oargs...)
|
||||
if opts == nil {
|
||||
opts = &CreateOpts{}
|
||||
}
|
||||
|
||||
oargs, err := opts.args()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, oargs...)
|
||||
cmd := r.command(context, append(args, id)...)
|
||||
if opts != nil && opts.IO != nil {
|
||||
if opts.IO != nil {
|
||||
opts.Set(cmd)
|
||||
}
|
||||
cmd.ExtraFiles = opts.ExtraFiles
|
||||
|
||||
if cmd.Stdout == nil && cmd.Stderr == nil {
|
||||
data, err := cmdOutput(cmd, true, nil)
|
||||
data, err := r.cmdOutput(cmd, true, nil)
|
||||
defer putBuf(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %s", err, data.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
ec, err := Monitor.Start(cmd)
|
||||
ec, err := r.startCommand(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts != nil && opts.IO != nil {
|
||||
if opts.IO != nil {
|
||||
if c, ok := opts.IO.(StartCloser); ok {
|
||||
if err := c.CloseAfterStart(); err != nil {
|
||||
return err
|
||||
|
@ -171,12 +225,14 @@ func (r *Runc) Start(context context.Context, id string) error {
|
|||
return r.runOrError(r.command(context, "start", id))
|
||||
}
|
||||
|
||||
// ExecOpts holds optional settings when starting an exec process with runc
|
||||
type ExecOpts struct {
|
||||
IO
|
||||
PidFile string
|
||||
ConsoleSocket ConsoleSocket
|
||||
Detach bool
|
||||
Started chan<- int
|
||||
ExtraArgs []string
|
||||
}
|
||||
|
||||
func (o *ExecOpts) args() (out []string, err error) {
|
||||
|
@ -193,16 +249,22 @@ func (o *ExecOpts) args() (out []string, err error) {
|
|||
}
|
||||
out = append(out, "--pid-file", abs)
|
||||
}
|
||||
if len(o.ExtraArgs) > 0 {
|
||||
out = append(out, o.ExtraArgs...)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Exec executes an additional process inside the container based on a full
|
||||
// OCI Process specification
|
||||
func (r *Runc) Exec(context context.Context, id string, spec specs.Process, opts *ExecOpts) error {
|
||||
if opts == nil {
|
||||
opts = &ExecOpts{}
|
||||
}
|
||||
if opts.Started != nil {
|
||||
defer close(opts.Started)
|
||||
}
|
||||
f, err := ioutil.TempFile(os.Getenv("XDG_RUNTIME_DIR"), "runc-process")
|
||||
f, err := os.CreateTemp(os.Getenv("XDG_RUNTIME_DIR"), "runc-process")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -213,33 +275,31 @@ func (r *Runc) Exec(context context.Context, id string, spec specs.Process, opts
|
|||
return err
|
||||
}
|
||||
args := []string{"exec", "--process", f.Name()}
|
||||
if opts != nil {
|
||||
oargs, err := opts.args()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, oargs...)
|
||||
oargs, err := opts.args()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, oargs...)
|
||||
cmd := r.command(context, append(args, id)...)
|
||||
if opts != nil && opts.IO != nil {
|
||||
if opts.IO != nil {
|
||||
opts.Set(cmd)
|
||||
}
|
||||
if cmd.Stdout == nil && cmd.Stderr == nil {
|
||||
data, err := cmdOutput(cmd, true, opts.Started)
|
||||
data, err := r.cmdOutput(cmd, true, opts.Started)
|
||||
defer putBuf(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w: %s", err, data.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
ec, err := Monitor.Start(cmd)
|
||||
ec, err := r.startCommand(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.Started != nil {
|
||||
opts.Started <- cmd.Process.Pid
|
||||
}
|
||||
if opts != nil && opts.IO != nil {
|
||||
if opts.IO != nil {
|
||||
if c, ok := opts.IO.(StartCloser); ok {
|
||||
if err := c.CloseAfterStart(); err != nil {
|
||||
return err
|
||||
|
@ -256,22 +316,24 @@ func (r *Runc) Exec(context context.Context, id string, spec specs.Process, opts
|
|||
// Run runs the create, start, delete lifecycle of the container
|
||||
// and returns its exit status after it has exited
|
||||
func (r *Runc) Run(context context.Context, id, bundle string, opts *CreateOpts) (int, error) {
|
||||
if opts == nil {
|
||||
opts = &CreateOpts{}
|
||||
}
|
||||
if opts.Started != nil {
|
||||
defer close(opts.Started)
|
||||
}
|
||||
args := []string{"run", "--bundle", bundle}
|
||||
if opts != nil {
|
||||
oargs, err := opts.args()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
args = append(args, oargs...)
|
||||
oargs, err := opts.args()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
args = append(args, oargs...)
|
||||
cmd := r.command(context, append(args, id)...)
|
||||
if opts != nil && opts.IO != nil {
|
||||
if opts.IO != nil {
|
||||
opts.Set(cmd)
|
||||
}
|
||||
ec, err := Monitor.Start(cmd)
|
||||
cmd.ExtraFiles = opts.ExtraFiles
|
||||
ec, err := r.startCommand(cmd)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
@ -285,14 +347,19 @@ func (r *Runc) Run(context context.Context, id, bundle string, opts *CreateOpts)
|
|||
return status, err
|
||||
}
|
||||
|
||||
// DeleteOpts holds the deletion options for calling `runc delete`
|
||||
type DeleteOpts struct {
|
||||
Force bool
|
||||
Force bool
|
||||
ExtraArgs []string
|
||||
}
|
||||
|
||||
func (o *DeleteOpts) args() (out []string) {
|
||||
if o.Force {
|
||||
out = append(out, "--force")
|
||||
}
|
||||
if len(o.ExtraArgs) > 0 {
|
||||
out = append(out, o.ExtraArgs...)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
|
@ -307,13 +374,17 @@ func (r *Runc) Delete(context context.Context, id string, opts *DeleteOpts) erro
|
|||
|
||||
// KillOpts specifies options for killing a container and its processes
|
||||
type KillOpts struct {
|
||||
All bool
|
||||
All bool
|
||||
ExtraArgs []string
|
||||
}
|
||||
|
||||
func (o *KillOpts) args() (out []string) {
|
||||
if o.All {
|
||||
out = append(out, "--all")
|
||||
}
|
||||
if len(o.ExtraArgs) > 0 {
|
||||
out = append(out, o.ExtraArgs...)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
|
@ -335,7 +406,7 @@ func (r *Runc) Stats(context context.Context, id string) (*Stats, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ec, err := Monitor.Start(cmd)
|
||||
ec, err := r.startCommand(cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -357,7 +428,7 @@ func (r *Runc) Events(context context.Context, id string, interval time.Duration
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ec, err := Monitor.Start(cmd)
|
||||
ec, err := r.startCommand(cmd)
|
||||
if err != nil {
|
||||
rd.Close()
|
||||
return nil, err
|
||||
|
@ -401,7 +472,7 @@ func (r *Runc) Resume(context context.Context, id string) error {
|
|||
|
||||
// Ps lists all the processes inside the container returning their pids
|
||||
func (r *Runc) Ps(context context.Context, id string) ([]int, error) {
|
||||
data, err := cmdOutput(r.command(context, "ps", "--format", "json", id), true, nil)
|
||||
data, err := r.cmdOutput(r.command(context, "ps", "--format", "json", id), true, nil)
|
||||
defer putBuf(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %s", err, data.String())
|
||||
|
@ -415,7 +486,7 @@ func (r *Runc) Ps(context context.Context, id string) ([]int, error) {
|
|||
|
||||
// Top lists all the processes inside the container returning the full ps data
|
||||
func (r *Runc) Top(context context.Context, id string, psOptions string) (*TopResults, error) {
|
||||
data, err := cmdOutput(r.command(context, "ps", "--format", "table", id, psOptions), true, nil)
|
||||
data, err := r.cmdOutput(r.command(context, "ps", "--format", "table", id, psOptions), true, nil)
|
||||
defer putBuf(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s: %s", err, data.String())
|
||||
|
@ -428,6 +499,7 @@ func (r *Runc) Top(context context.Context, id string, psOptions string) (*TopRe
|
|||
return topResults, nil
|
||||
}
|
||||
|
||||
// CheckpointOpts holds the options for performing a criu checkpoint using runc
|
||||
type CheckpointOpts struct {
|
||||
// ImagePath is the path for saving the criu image file
|
||||
ImagePath string
|
||||
|
@ -454,13 +526,18 @@ type CheckpointOpts struct {
|
|||
LazyPages bool
|
||||
// StatusFile is the file criu writes \0 to once lazy-pages is ready
|
||||
StatusFile *os.File
|
||||
ExtraArgs []string
|
||||
}
|
||||
|
||||
// CgroupMode defines the cgroup mode used for checkpointing
|
||||
type CgroupMode string
|
||||
|
||||
const (
|
||||
Soft CgroupMode = "soft"
|
||||
Full CgroupMode = "full"
|
||||
// Soft is the "soft" cgroup mode
|
||||
Soft CgroupMode = "soft"
|
||||
// Full is the "full" cgroup mode
|
||||
Full CgroupMode = "full"
|
||||
// Strict is the "strict" cgroup mode
|
||||
Strict CgroupMode = "strict"
|
||||
)
|
||||
|
||||
|
@ -498,9 +575,13 @@ func (o *CheckpointOpts) args() (out []string) {
|
|||
if o.LazyPages {
|
||||
out = append(out, "--lazy-pages")
|
||||
}
|
||||
if len(o.ExtraArgs) > 0 {
|
||||
out = append(out, o.ExtraArgs...)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// CheckpointAction represents specific actions executed during checkpoint/restore
|
||||
type CheckpointAction func([]string) []string
|
||||
|
||||
// LeaveRunning keeps the container running after the checkpoint has been completed
|
||||
|
@ -535,6 +616,7 @@ func (r *Runc) Checkpoint(context context.Context, id string, opts *CheckpointOp
|
|||
return r.runOrError(cmd)
|
||||
}
|
||||
|
||||
// RestoreOpts holds the options for performing a criu restore using runc
|
||||
type RestoreOpts struct {
|
||||
CheckpointOpts
|
||||
IO
|
||||
|
@ -544,6 +626,7 @@ type RestoreOpts struct {
|
|||
NoSubreaper bool
|
||||
NoPivot bool
|
||||
ConsoleSocket ConsoleSocket
|
||||
ExtraArgs []string
|
||||
}
|
||||
|
||||
func (o *RestoreOpts) args() ([]string, error) {
|
||||
|
@ -567,6 +650,9 @@ func (o *RestoreOpts) args() ([]string, error) {
|
|||
if o.NoSubreaper {
|
||||
out = append(out, "-no-subreaper")
|
||||
}
|
||||
if len(o.ExtraArgs) > 0 {
|
||||
out = append(out, o.ExtraArgs...)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
|
@ -585,7 +671,7 @@ func (r *Runc) Restore(context context.Context, id, bundle string, opts *Restore
|
|||
if opts != nil && opts.IO != nil {
|
||||
opts.Set(cmd)
|
||||
}
|
||||
ec, err := Monitor.Start(cmd)
|
||||
ec, err := r.startCommand(cmd)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
@ -611,14 +697,16 @@ func (r *Runc) Update(context context.Context, id string, resources *specs.Linux
|
|||
if err := json.NewEncoder(buf).Encode(resources); err != nil {
|
||||
return err
|
||||
}
|
||||
args := []string{"update", "--resources", "-", id}
|
||||
args := []string{"update", "--resources=-", id}
|
||||
cmd := r.command(context, args...)
|
||||
cmd.Stdin = buf
|
||||
return r.runOrError(cmd)
|
||||
}
|
||||
|
||||
// ErrParseRuncVersion is used when the runc version can't be parsed
|
||||
var ErrParseRuncVersion = errors.New("unable to parse runc version")
|
||||
|
||||
// Version represents the runc version information
|
||||
type Version struct {
|
||||
Runc string
|
||||
Commit string
|
||||
|
@ -627,7 +715,7 @@ type Version struct {
|
|||
|
||||
// Version returns the runc and runtime-spec versions
|
||||
func (r *Runc) Version(context context.Context) (Version, error) {
|
||||
data, err := cmdOutput(r.command(context, "--version"), false, nil)
|
||||
data, err := r.cmdOutput(r.command(context, "--version"), false, nil)
|
||||
defer putBuf(data)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
|
@ -657,6 +745,26 @@ func parseVersion(data []byte) (Version, error) {
|
|||
return v, nil
|
||||
}
|
||||
|
||||
// Features shows the features implemented by the runtime.
|
||||
//
|
||||
// Availability:
|
||||
//
|
||||
// - runc: supported since runc v1.1.0
|
||||
// - crun: https://github.com/containers/crun/issues/1177
|
||||
// - youki: https://github.com/containers/youki/issues/815
|
||||
func (r *Runc) Features(context context.Context) (*features.Features, error) {
|
||||
data, err := r.cmdOutput(r.command(context, "features"), false, nil)
|
||||
defer putBuf(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var feat features.Features
|
||||
if err := json.Unmarshal(data.Bytes(), &feat); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &feat, nil
|
||||
}
|
||||
|
||||
func (r *Runc) args() (out []string) {
|
||||
if r.Root != "" {
|
||||
out = append(out, "--root", r.Root)
|
||||
|
@ -670,9 +778,6 @@ func (r *Runc) args() (out []string) {
|
|||
if r.LogFormat != none {
|
||||
out = append(out, "--log-format", string(r.LogFormat))
|
||||
}
|
||||
if r.Criu != "" {
|
||||
out = append(out, "--criu", r.Criu)
|
||||
}
|
||||
if r.SystemdCgroup {
|
||||
out = append(out, "--systemd-cgroup")
|
||||
}
|
||||
|
@ -680,6 +785,9 @@ func (r *Runc) args() (out []string) {
|
|||
// nil stands for "auto" (differs from explicit "false")
|
||||
out = append(out, "--rootless="+strconv.FormatBool(*r.Rootless))
|
||||
}
|
||||
if len(r.ExtraArgs) > 0 {
|
||||
out = append(out, r.ExtraArgs...)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
|
@ -689,7 +797,7 @@ func (r *Runc) args() (out []string) {
|
|||
// <stderr>
|
||||
func (r *Runc) runOrError(cmd *exec.Cmd) error {
|
||||
if cmd.Stdout != nil || cmd.Stderr != nil {
|
||||
ec, err := Monitor.Start(cmd)
|
||||
ec, err := r.startCommand(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -699,7 +807,7 @@ func (r *Runc) runOrError(cmd *exec.Cmd) error {
|
|||
}
|
||||
return err
|
||||
}
|
||||
data, err := cmdOutput(cmd, true, nil)
|
||||
data, err := r.cmdOutput(cmd, true, nil)
|
||||
defer putBuf(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %s", err, data.String())
|
||||
|
@ -709,14 +817,14 @@ func (r *Runc) runOrError(cmd *exec.Cmd) error {
|
|||
|
||||
// callers of cmdOutput are expected to call putBuf on the returned Buffer
|
||||
// to ensure it is released back to the shared pool after use.
|
||||
func cmdOutput(cmd *exec.Cmd, combined bool, started chan<- int) (*bytes.Buffer, error) {
|
||||
func (r *Runc) cmdOutput(cmd *exec.Cmd, combined bool, started chan<- int) (*bytes.Buffer, error) {
|
||||
b := getBuf()
|
||||
|
||||
cmd.Stdout = b
|
||||
if combined {
|
||||
cmd.Stderr = b
|
||||
}
|
||||
ec, err := Monitor.Start(cmd)
|
||||
ec, err := r.startCommand(cmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -732,6 +840,7 @@ func cmdOutput(cmd *exec.Cmd, combined bool, started chan<- int) (*bytes.Buffer,
|
|||
return b, err
|
||||
}
|
||||
|
||||
// ExitError holds the status return code when a process exits with an error code
|
||||
type ExitError struct {
|
||||
Status int
|
||||
}
|
||||
|
|
38
vendor/github.com/containerd/go-runc/runc_unix.go
generated
vendored
38
vendor/github.com/containerd/go-runc/runc_unix.go
generated
vendored
|
@ -1,38 +0,0 @@
|
|||
//+build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package runc
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Runc is the client to the runc cli
|
||||
type Runc struct {
|
||||
//If command is empty, DefaultCommand is used
|
||||
Command string
|
||||
Root string
|
||||
Debug bool
|
||||
Log string
|
||||
LogFormat Format
|
||||
PdeathSignal unix.Signal
|
||||
Setpgid bool
|
||||
Criu string
|
||||
SystemdCgroup bool
|
||||
Rootless *bool // nil stands for "auto"
|
||||
}
|
31
vendor/github.com/containerd/go-runc/runc_windows.go
generated
vendored
31
vendor/github.com/containerd/go-runc/runc_windows.go
generated
vendored
|
@ -1,31 +0,0 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package runc
|
||||
|
||||
// Runc is the client to the runc cli
|
||||
type Runc struct {
|
||||
//If command is empty, DefaultCommand is used
|
||||
Command string
|
||||
Root string
|
||||
Debug bool
|
||||
Log string
|
||||
LogFormat Format
|
||||
Setpgid bool
|
||||
Criu string
|
||||
SystemdCgroup bool
|
||||
Rootless *bool // nil stands for "auto"
|
||||
}
|
16
vendor/github.com/containerd/go-runc/utils.go
generated
vendored
16
vendor/github.com/containerd/go-runc/utils.go
generated
vendored
|
@ -18,34 +18,22 @@ package runc
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// ReadPidFile reads the pid file at the provided path and returns
|
||||
// the pid or an error if the read and conversion is unsuccessful
|
||||
func ReadPidFile(path string) (int, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
return strconv.Atoi(string(data))
|
||||
}
|
||||
|
||||
const exitSignalOffset = 128
|
||||
|
||||
// exitStatus returns the correct exit status for a process based on if it
|
||||
// was signaled or exited cleanly
|
||||
func exitStatus(status syscall.WaitStatus) int {
|
||||
if status.Signaled() {
|
||||
return exitSignalOffset + int(status.Signal())
|
||||
}
|
||||
return status.ExitStatus()
|
||||
}
|
||||
|
||||
var bytesBufferPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return bytes.NewBuffer(nil)
|
||||
|
|
1
vendor/github.com/docker/distribution/.dockerignore
generated
vendored
Normal file
1
vendor/github.com/docker/distribution/.dockerignore
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
bin/
|
7
vendor/github.com/docker/distribution/.golangci.yml
generated
vendored
7
vendor/github.com/docker/distribution/.golangci.yml
generated
vendored
|
@ -18,3 +18,10 @@ run:
|
|||
deadline: 2m
|
||||
skip-dirs:
|
||||
- vendor
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
# io/ioutil is deprecated, but won't be removed until Go v2. It's safe to ignore for the release/2.8 branch.
|
||||
- text: "SA1019: \"io/ioutil\" has been deprecated since Go 1.16"
|
||||
linters:
|
||||
- staticcheck
|
||||
|
|
6
vendor/github.com/docker/distribution/.mailmap
generated
vendored
6
vendor/github.com/docker/distribution/.mailmap
generated
vendored
|
@ -44,6 +44,8 @@ Thomas Berger <loki@lokis-chaos.de> Thomas Berger <tbe@users.noreply.github.com>
|
|||
Samuel Karp <skarp@amazon.com> Samuel Karp <samuelkarp@users.noreply.github.com>
|
||||
Justin Cormack <justin.cormack@docker.com>
|
||||
sayboras <sayboras@yahoo.com>
|
||||
CrazyMax <github@crazymax.dev>
|
||||
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
||||
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
||||
Hayley Swimelar <hswimelar@gmail.com>
|
||||
Jose D. Gomez R <jose.gomez@suse.com>
|
||||
Shengjing Zhu <zhsj@debian.org>
|
||||
Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com>
|
||||
|
|
82
vendor/github.com/docker/distribution/Dockerfile
generated
vendored
82
vendor/github.com/docker/distribution/Dockerfile
generated
vendored
|
@ -1,49 +1,59 @@
|
|||
# syntax=docker/dockerfile:1.3
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.16.15
|
||||
ARG GORELEASER_XX_VERSION=1.2.5
|
||||
ARG GO_VERSION=1.19.9
|
||||
ARG ALPINE_VERSION=3.16
|
||||
ARG XX_VERSION=1.2.1
|
||||
|
||||
FROM --platform=$BUILDPLATFORM crazymax/goreleaser-xx:${GORELEASER_XX_VERSION} AS goreleaser-xx
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS base
|
||||
COPY --from=goreleaser-xx / /
|
||||
RUN apk add --no-cache file git
|
||||
WORKDIR /go/src/github.com/docker/distribution
|
||||
|
||||
FROM base AS build
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base
|
||||
COPY --from=xx / /
|
||||
RUN apk add --no-cache bash coreutils file git
|
||||
ENV GO111MODULE=auto
|
||||
ENV CGO_ENABLED=0
|
||||
# GIT_REF is used by goreleaser-xx to handle the proper git ref when available.
|
||||
# It will fallback to the working tree info if empty and use "git tag --points-at"
|
||||
# or "git describe" to define the version info.
|
||||
ARG GIT_REF
|
||||
ARG TARGETPLATFORM
|
||||
ARG PKG="github.com/distribution/distribution"
|
||||
ARG BUILDTAGS="include_oss include_gcs"
|
||||
RUN --mount=type=bind,rw \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=target=/go/pkg/mod,type=cache \
|
||||
goreleaser-xx --debug \
|
||||
--name="registry" \
|
||||
--dist="/out" \
|
||||
--main="./cmd/registry" \
|
||||
--flags="-v" \
|
||||
--ldflags="-s -w -X '$PKG/version.Version={{.Version}}' -X '$PKG/version.Revision={{.Commit}}' -X '$PKG/version.Package=$PKG'" \
|
||||
--tags="$BUILDTAGS" \
|
||||
--files="LICENSE" \
|
||||
--files="README.md"
|
||||
WORKDIR /go/src/github.com/docker/distribution
|
||||
|
||||
FROM scratch AS artifact
|
||||
COPY --from=build /out/*.tar.gz /
|
||||
COPY --from=build /out/*.zip /
|
||||
COPY --from=build /out/*.sha256 /
|
||||
FROM base AS version
|
||||
ARG PKG="github.com/docker/distribution"
|
||||
RUN --mount=target=. \
|
||||
VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \
|
||||
echo "-X ${PKG}/version.Version=${VERSION#v} -X ${PKG}/version.Revision=${REVISION} -X ${PKG}/version.Package=${PKG}" | tee /tmp/.ldflags; \
|
||||
echo -n "${VERSION}" | tee /tmp/.version;
|
||||
|
||||
FROM base AS build
|
||||
ARG TARGETPLATFORM
|
||||
ARG LDFLAGS="-s -w"
|
||||
ARG BUILDTAGS="include_oss include_gcs"
|
||||
RUN --mount=type=bind,target=/go/src/github.com/docker/distribution,rw \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=target=/go/pkg/mod,type=cache \
|
||||
--mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=version \
|
||||
set -x ; xx-go build -trimpath -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/registry ./cmd/registry \
|
||||
&& xx-verify --static /usr/bin/registry
|
||||
|
||||
FROM scratch AS binary
|
||||
COPY --from=build /usr/local/bin/registry* /
|
||||
COPY --from=build /usr/bin/registry /
|
||||
|
||||
FROM alpine:3.14
|
||||
FROM base AS releaser
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
WORKDIR /work
|
||||
RUN --mount=from=binary,target=/build \
|
||||
--mount=type=bind,target=/src \
|
||||
--mount=type=bind,source=/tmp/.version,target=/tmp/.version,from=version \
|
||||
VERSION=$(cat /tmp/.version) \
|
||||
&& mkdir -p /out \
|
||||
&& cp /build/registry /src/README.md /src/LICENSE . \
|
||||
&& tar -czvf "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" * \
|
||||
&& sha256sum -z "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" | awk '{ print $1 }' > "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz.sha256"
|
||||
|
||||
FROM scratch AS artifact
|
||||
COPY --from=releaser /out /
|
||||
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
|
||||
COPY --from=build /usr/local/bin/registry /bin/registry
|
||||
COPY --from=binary /registry /bin/registry
|
||||
VOLUME ["/var/lib/registry"]
|
||||
EXPOSE 5000
|
||||
ENTRYPOINT ["registry"]
|
||||
|
|
2
vendor/github.com/docker/distribution/Makefile
generated
vendored
2
vendor/github.com/docker/distribution/Makefile
generated
vendored
|
@ -50,7 +50,7 @@ version/version.go:
|
|||
|
||||
check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck")
|
||||
@echo "$(WHALE) $@"
|
||||
golangci-lint run
|
||||
@GO111MODULE=off golangci-lint run
|
||||
|
||||
test: ## run tests, except integration test with test.short
|
||||
@echo "$(WHALE) $@"
|
||||
|
|
21
vendor/github.com/docker/distribution/docker-bake.hcl
generated
vendored
21
vendor/github.com/docker/distribution/docker-bake.hcl
generated
vendored
|
@ -1,15 +1,3 @@
|
|||
// GITHUB_REF is the actual ref that triggers the workflow
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables
|
||||
variable "GITHUB_REF" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
target "_common" {
|
||||
args = {
|
||||
GIT_REF = GITHUB_REF
|
||||
}
|
||||
}
|
||||
|
||||
group "default" {
|
||||
targets = ["image-local"]
|
||||
}
|
||||
|
@ -20,13 +8,11 @@ target "docker-metadata-action" {
|
|||
}
|
||||
|
||||
target "binary" {
|
||||
inherits = ["_common"]
|
||||
target = "binary"
|
||||
output = ["./bin"]
|
||||
}
|
||||
|
||||
target "artifact" {
|
||||
inherits = ["_common"]
|
||||
target = "artifact"
|
||||
output = ["./bin"]
|
||||
}
|
||||
|
@ -43,8 +29,13 @@ target "artifact-all" {
|
|||
]
|
||||
}
|
||||
|
||||
// Special target: https://github.com/docker/metadata-action#bake-definition
|
||||
target "docker-metadata-action" {
|
||||
tags = ["registry:local"]
|
||||
}
|
||||
|
||||
target "image" {
|
||||
inherits = ["_common", "docker-metadata-action"]
|
||||
inherits = ["docker-metadata-action"]
|
||||
}
|
||||
|
||||
target "image-local" {
|
||||
|
|
4
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
4
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
|
@ -3,13 +3,13 @@
|
|||
//
|
||||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [domain '/'] path-component ['/' path-component]*
|
||||
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
|
|
17
vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
generated
vendored
17
vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
generated
vendored
|
@ -134,6 +134,19 @@ var (
|
|||
},
|
||||
}
|
||||
|
||||
invalidPaginationResponseDescriptor = ResponseDescriptor{
|
||||
Name: "Invalid pagination number",
|
||||
Description: "The received parameter n was invalid in some way, as described by the error code. The client should resolve the issue and retry the request.",
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Body: BodyDescriptor{
|
||||
ContentType: "application/json",
|
||||
Format: errorsBody,
|
||||
},
|
||||
ErrorCodes: []errcode.ErrorCode{
|
||||
ErrorCodePaginationNumberInvalid,
|
||||
},
|
||||
}
|
||||
|
||||
repositoryNotFoundResponseDescriptor = ResponseDescriptor{
|
||||
Name: "No Such Repository Error",
|
||||
StatusCode: http.StatusNotFound,
|
||||
|
@ -490,6 +503,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||
},
|
||||
},
|
||||
Failures: []ResponseDescriptor{
|
||||
invalidPaginationResponseDescriptor,
|
||||
unauthorizedResponseDescriptor,
|
||||
repositoryNotFoundResponseDescriptor,
|
||||
deniedResponseDescriptor,
|
||||
|
@ -1578,6 +1592,9 @@ var routeDescriptors = []RouteDescriptor{
|
|||
},
|
||||
},
|
||||
},
|
||||
Failures: []ResponseDescriptor{
|
||||
invalidPaginationResponseDescriptor,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
9
vendor/github.com/docker/distribution/registry/api/v2/errors.go
generated
vendored
9
vendor/github.com/docker/distribution/registry/api/v2/errors.go
generated
vendored
|
@ -133,4 +133,13 @@ var (
|
|||
longer proceed.`,
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
})
|
||||
|
||||
ErrorCodePaginationNumberInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "PAGINATION_NUMBER_INVALID",
|
||||
Message: "invalid number of results requested",
|
||||
Description: `Returned when the "n" parameter (number of results
|
||||
to return) is not an integer, "n" is negative or "n" is bigger than
|
||||
the maximum allowed.`,
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
})
|
||||
)
|
||||
|
|
2
vendor/github.com/docker/distribution/registry/client/errors.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/client/errors.go
generated
vendored
|
@ -55,6 +55,8 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
|
|||
switch statusCode {
|
||||
case http.StatusUnauthorized:
|
||||
return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
|
||||
case http.StatusForbidden:
|
||||
return errcode.ErrorCodeDenied.WithMessage(detailsErr.Details)
|
||||
case http.StatusTooManyRequests:
|
||||
return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
|
||||
default:
|
||||
|
|
4
vendor/github.com/docker/distribution/registry/client/repository.go
generated
vendored
4
vendor/github.com/docker/distribution/registry/client/repository.go
generated
vendored
|
@ -114,9 +114,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri
|
|||
return 0, err
|
||||
}
|
||||
|
||||
for cnt := range ctlg.Repositories {
|
||||
entries[cnt] = ctlg.Repositories[cnt]
|
||||
}
|
||||
copy(entries, ctlg.Repositories)
|
||||
numFilled = len(ctlg.Repositories)
|
||||
|
||||
link := resp.Header.Get("Link")
|
||||
|
|
1
vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
1
vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
|
@ -180,7 +180,6 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
|
|||
// context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
|
||||
}
|
||||
|
||||
req.Header.Add("Accept-Encoding", "identity")
|
||||
resp, err := hrs.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
22
vendor/github.com/moby/buildkit/exporter/containerimage/writer.go
generated
vendored
22
vendor/github.com/moby/buildkit/exporter/containerimage/writer.go
generated
vendored
|
@ -574,11 +574,10 @@ func (ic *ImageWriter) Applier() diff.Applier {
|
|||
func defaultImageConfig() ([]byte, error) {
|
||||
pl := platforms.Normalize(platforms.DefaultSpec())
|
||||
|
||||
img := ocispecs.Image{
|
||||
Architecture: pl.Architecture,
|
||||
OS: pl.OS,
|
||||
Variant: pl.Variant,
|
||||
}
|
||||
img := ocispecs.Image{}
|
||||
img.Architecture = pl.Architecture
|
||||
img.OS = pl.OS
|
||||
img.Variant = pl.Variant
|
||||
img.RootFS.Type = "layers"
|
||||
img.Config.WorkingDir = "/"
|
||||
img.Config.Env = []string{"PATH=" + system.DefaultPathEnv(pl.OS)}
|
||||
|
@ -587,13 +586,12 @@ func defaultImageConfig() ([]byte, error) {
|
|||
}
|
||||
|
||||
func attestationsConfig(layers []ocispecs.Descriptor) ([]byte, error) {
|
||||
img := ocispecs.Image{
|
||||
Architecture: intotoPlatform.Architecture,
|
||||
OS: intotoPlatform.OS,
|
||||
OSVersion: intotoPlatform.OSVersion,
|
||||
OSFeatures: intotoPlatform.OSFeatures,
|
||||
Variant: intotoPlatform.Variant,
|
||||
}
|
||||
img := ocispecs.Image{}
|
||||
img.Architecture = intotoPlatform.Architecture
|
||||
img.OS = intotoPlatform.OS
|
||||
img.OSVersion = intotoPlatform.OSVersion
|
||||
img.OSFeatures = intotoPlatform.OSFeatures
|
||||
img.Variant = intotoPlatform.Variant
|
||||
img.RootFS.Type = "layers"
|
||||
for _, layer := range layers {
|
||||
img.RootFS.DiffIDs = append(img.RootFS.DiffIDs, digest.Digest(layer.Annotations["containerd.io/uncompressed"]))
|
||||
|
|
11
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go
generated
vendored
11
vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go
generated
vendored
|
@ -20,13 +20,10 @@ func clone(src Image) Image {
|
|||
}
|
||||
|
||||
func emptyImage(platform ocispecs.Platform) Image {
|
||||
img := Image{
|
||||
Image: ocispecs.Image{
|
||||
Architecture: platform.Architecture,
|
||||
OS: platform.OS,
|
||||
Variant: platform.Variant,
|
||||
},
|
||||
}
|
||||
img := Image{}
|
||||
img.Architecture = platform.Architecture
|
||||
img.OS = platform.OS
|
||||
img.Variant = platform.Variant
|
||||
img.RootFS.Type = "layers"
|
||||
img.Config.WorkingDir = "/"
|
||||
img.Config.Env = []string{"PATH=" + system.DefaultPathEnv(platform.OS)}
|
||||
|
|
5
vendor/github.com/moby/buildkit/session/grpc.go
generated
vendored
5
vendor/github.com/moby/buildkit/session/grpc.go
generated
vendored
|
@ -112,6 +112,11 @@ func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func())
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
if failedBefore {
|
||||
bklog.G(ctx).Error("healthcheck failed fatally")
|
||||
return
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue