Compare commits
31 commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
1331b8c39a | ||
![]() |
907f037141 | ||
![]() |
a5b597ea51 | ||
![]() |
8bbfa32741 | ||
![]() |
807e415260 | ||
![]() |
8587a1c617 | ||
![]() |
9717369913 | ||
![]() |
ed0c147c8f | ||
![]() |
90be9ab802 | ||
![]() |
d73f7031e0 | ||
![]() |
ea7f7f168e | ||
![]() |
233c49438b | ||
![]() |
2b7424512a | ||
![]() |
f77a3274b4 | ||
![]() |
c76bb6a3a3 | ||
![]() |
71846e82c1 | ||
![]() |
ecbc27aa22 | ||
![]() |
c01f02cfcb | ||
![]() |
ce79cd19f6 | ||
![]() |
1235338836 | ||
![]() |
763d2b7996 | ||
![]() |
e9eff01dca | ||
![]() |
69ef9a7f90 | ||
![]() |
86770904be | ||
![]() |
31b98f9502 | ||
![]() |
bfffb0974e | ||
![]() |
e28bc0d271 | ||
![]() |
d169a57306 | ||
![]() |
63640838ba | ||
![]() |
269e55a915 | ||
![]() |
012dd239ce |
54 changed files with 1214 additions and 217 deletions
72
.github/workflows/bin-image.yml
vendored
Normal file
72
.github/workflows/bin-image.yml
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
name: bin-image
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- '[0-9]+.[0-9]+'
|
||||
tags:
|
||||
- 'v*'
|
||||
pull_request:
|
||||
|
||||
env:
|
||||
PLATFORM: Moby Engine
|
||||
PRODUCT: Moby
|
||||
DEFAULT_PRODUCT_LICENSE: Moby
|
||||
PACKAGER_NAME: Moby
|
||||
|
||||
jobs:
|
||||
validate-dco:
|
||||
uses: ./.github/workflows/.dco.yml
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
needs:
|
||||
- validate-dco
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
-
|
||||
name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: moby-bin
|
||||
### versioning strategy
|
||||
## push semver tag v23.0.0
|
||||
# moby/moby-bin:23.0.0
|
||||
# moby/moby-bin:latest
|
||||
## push semver prelease tag v23.0.0-beta.1
|
||||
# moby/moby-bin:23.0.0-beta.1
|
||||
## push on master
|
||||
# moby/moby-bin:master
|
||||
## push on 23.0 branch
|
||||
# moby/moby-bin:23.0
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
-
|
||||
name: Build
|
||||
uses: docker/bake-action@v2
|
||||
with:
|
||||
files: |
|
||||
./docker-bake.hcl
|
||||
${{ steps.meta.outputs.bake-file }}
|
||||
targets: bin-image-cross
|
||||
set: |
|
||||
*.output=type=cacheonly
|
|
@ -192,7 +192,7 @@ RUN git init . && git remote add origin "https://github.com/containerd/container
|
|||
# When updating the binary version you may also need to update the vendor
|
||||
# version to pick up bug fixes or new APIs, however, usually the Go packages
|
||||
# are built from a commit from the master branch.
|
||||
ARG CONTAINERD_VERSION=v1.7.0
|
||||
ARG CONTAINERD_VERSION=v1.7.1
|
||||
RUN git fetch -q --depth 1 origin "${CONTAINERD_VERSION}" +refs/tags/*:refs/tags/* && git checkout -q FETCH_HEAD
|
||||
|
||||
FROM base AS containerd-build
|
||||
|
|
|
@ -168,7 +168,7 @@ SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPref
|
|||
ARG GO_VERSION=1.20.4
|
||||
ARG GOTESTSUM_VERSION=v1.8.2
|
||||
ARG GOWINRES_VERSION=v0.3.0
|
||||
ARG CONTAINERD_VERSION=v1.7.0
|
||||
ARG CONTAINERD_VERSION=v1.7.1
|
||||
|
||||
# Environment variable notes:
|
||||
# - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -45,7 +46,7 @@ type Backend interface {
|
|||
// ContainerCreateWorkdir creates the workdir
|
||||
ContainerCreateWorkdir(containerID string) error
|
||||
|
||||
CreateImage(config []byte, parent string) (Image, error)
|
||||
CreateImage(ctx context.Context, config []byte, parent string, contentStoreDigest digest.Digest) (Image, error)
|
||||
|
||||
ImageCacheBuilder
|
||||
}
|
||||
|
@ -104,6 +105,7 @@ type ROLayer interface {
|
|||
Release() error
|
||||
NewRWLayer() (RWLayer, error)
|
||||
DiffID() layer.DiffID
|
||||
ContentStoreDigest() digest.Digest
|
||||
}
|
||||
|
||||
// RWLayer is active layer that can be read/modified
|
||||
|
|
|
@ -21,8 +21,11 @@ type dispatchTestCase struct {
|
|||
files map[string]string
|
||||
}
|
||||
|
||||
func init() {
|
||||
reexec.Init()
|
||||
func TestMain(m *testing.M) {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestDispatch(t *testing.T) {
|
||||
|
|
|
@ -63,7 +63,7 @@ func (b *Builder) commitContainer(ctx context.Context, dispatchState *dispatchSt
|
|||
return err
|
||||
}
|
||||
|
||||
func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, parent builder.Image, runConfig *container.Config) error {
|
||||
func (b *Builder) exportImage(ctx context.Context, state *dispatchState, layer builder.RWLayer, parent builder.Image, runConfig *container.Config) error {
|
||||
newLayer, err := layer.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -98,7 +98,15 @@ func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, paren
|
|||
return errors.Wrap(err, "failed to encode image config")
|
||||
}
|
||||
|
||||
exportedImage, err := b.docker.CreateImage(config, state.imageID)
|
||||
// when writing the new image's manifest, we now need to pass in the new layer's digest.
|
||||
// before the containerd store work this was unnecessary since we get the layer id
|
||||
// from the image's RootFS ChainID -- see:
|
||||
// https://github.com/moby/moby/blob/8cf66ed7322fa885ef99c4c044fa23e1727301dc/image/store.go#L162
|
||||
// however, with the containerd store we can't do this. An alternative implementation here
|
||||
// without changing the signature would be to get the layer digest by walking the content store
|
||||
// and filtering the objects to find the layer with the DiffID we want, but that has performance
|
||||
// implications that should be called out/investigated
|
||||
exportedImage, err := b.docker.CreateImage(ctx, config, state.imageID, newLayer.ContentStoreDigest())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to export image")
|
||||
}
|
||||
|
@ -170,7 +178,7 @@ func (b *Builder) performCopy(ctx context.Context, req dispatchRequest, inst cop
|
|||
return errors.Wrapf(err, "failed to copy files")
|
||||
}
|
||||
}
|
||||
return b.exportImage(state, rwLayer, imageMount.Image(), runConfigWithCommentCmd)
|
||||
return b.exportImage(ctx, state, rwLayer, imageMount.Image(), runConfigWithCommentCmd)
|
||||
}
|
||||
|
||||
func createDestInfo(workingDir string, inst copyInstruction, rwLayer builder.RWLayer, platform string) (copyInfo, error) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package dockerfile // import "github.com/docker/docker/builder/dockerfile"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
@ -193,6 +194,7 @@ type MockROLayer struct {
|
|||
diffID layer.DiffID
|
||||
}
|
||||
|
||||
func (l *MockROLayer) ContentStoreDigest() digest.Digest { return "" }
|
||||
func (l *MockROLayer) Release() error { return nil }
|
||||
func (l *MockROLayer) NewRWLayer() (builder.RWLayer, error) { return nil, nil }
|
||||
func (l *MockROLayer) DiffID() layer.DiffID { return l.diffID }
|
||||
|
@ -217,6 +219,6 @@ func TestExportImage(t *testing.T) {
|
|||
imageSources: getMockImageSource(nil, nil, nil),
|
||||
docker: getMockBuildBackend(),
|
||||
}
|
||||
err := b.exportImage(ds, layer, parentImage, runConfig)
|
||||
err := b.exportImage(context.TODO(), ds, layer, parentImage, runConfig)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
containerpkg "github.com/docker/docker/container"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// MockBackend implements the builder.Backend interface for unit testing
|
||||
|
@ -80,7 +81,7 @@ func (m *MockBackend) MakeImageCache(ctx context.Context, cacheFrom []string) (b
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockBackend) CreateImage(config []byte, parent string) (builder.Image, error) {
|
||||
func (m *MockBackend) CreateImage(ctx context.Context, config []byte, parent string, layerDigest digest.Digest) (builder.Image, error) {
|
||||
return &mockImage{id: "test"}, nil
|
||||
}
|
||||
|
||||
|
@ -119,6 +120,10 @@ func (mic *mockImageCache) GetCache(parentID string, cfg *container.Config) (str
|
|||
|
||||
type mockLayer struct{}
|
||||
|
||||
func (l *mockLayer) ContentStoreDigest() digest.Digest {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (l *mockLayer) Release() error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,8 +17,11 @@ const (
|
|||
contents = "contents test"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Init()
|
||||
func TestMain(m *testing.M) {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestCloseRootDirectory(t *testing.T) {
|
||||
|
|
|
@ -6,13 +6,9 @@ import (
|
|||
"os"
|
||||
"path"
|
||||
"text/template"
|
||||
|
||||
"github.com/docker/docker/pkg/aaparser"
|
||||
)
|
||||
|
||||
type profileData struct {
|
||||
Version int
|
||||
}
|
||||
type profileData struct{}
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 2 {
|
||||
|
@ -22,15 +18,6 @@ func main() {
|
|||
// parse the arg
|
||||
apparmorProfilePath := os.Args[1]
|
||||
|
||||
version, err := aaparser.GetVersion()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
data := profileData{
|
||||
Version: version,
|
||||
}
|
||||
fmt.Printf("apparmor_parser is of version %+v\n", data)
|
||||
|
||||
// parse the template
|
||||
compiled, err := template.New("apparmor_profile").Parse(dockerProfileTemplate)
|
||||
if err != nil {
|
||||
|
@ -48,6 +35,7 @@ func main() {
|
|||
}
|
||||
defer f.Close()
|
||||
|
||||
data := profileData{}
|
||||
if err := compiled.Execute(f, data); err != nil {
|
||||
log.Fatalf("executing template failed: %v", err)
|
||||
}
|
||||
|
|
|
@ -149,9 +149,7 @@ profile /usr/bin/docker (attach_disconnected, complain) {
|
|||
}
|
||||
# xz works via pipes, so we do not need access to the filesystem.
|
||||
profile /usr/bin/xz (complain) {
|
||||
{{if ge .Version 209000}}
|
||||
signal (receive) peer=/usr/bin/docker,
|
||||
{{end}}
|
||||
/etc/ld.so.cache r,
|
||||
/lib/** rm,
|
||||
/usr/bin/xz rm,
|
||||
|
|
|
@ -2,11 +2,86 @@ package containerd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
imagetype "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/image"
|
||||
)
|
||||
|
||||
// MakeImageCache creates a stateful image cache.
|
||||
func (i *ImageService) MakeImageCache(ctx context.Context, cacheFrom []string) (builder.ImageCache, error) {
|
||||
panic("not implemented")
|
||||
images := []*image.Image{}
|
||||
for _, c := range cacheFrom {
|
||||
im, err := i.GetImage(ctx, c, imagetype.GetImageOpts{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
images = append(images, im)
|
||||
}
|
||||
return &imageCache{images: images, c: i}, nil
|
||||
}
|
||||
|
||||
type imageCache struct {
|
||||
images []*image.Image
|
||||
c *ImageService
|
||||
}
|
||||
|
||||
func (ic *imageCache) GetCache(parentID string, cfg *container.Config) (imageID string, err error) {
|
||||
ctx := context.TODO()
|
||||
parent, err := ic.c.GetImage(ctx, parentID, imagetype.GetImageOpts{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, localCachedImage := range ic.images {
|
||||
if isMatch(localCachedImage, parent, cfg) {
|
||||
return localCachedImage.ID().String(), nil
|
||||
}
|
||||
}
|
||||
|
||||
children, err := ic.c.Children(ctx, parent.ID())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, children := range children {
|
||||
childImage, err := ic.c.GetImage(ctx, children.String(), imagetype.GetImageOpts{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if isMatch(childImage, parent, cfg) {
|
||||
return children.String(), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// isMatch checks whether a given target can be used as cache for the given
|
||||
// parent image/config combination.
|
||||
// A target can only be an immediate child of the given parent image. For
|
||||
// a parent image with `n` history entries, a valid target must have `n+1`
|
||||
// entries and the extra entry must match the provided config
|
||||
func isMatch(target, parent *image.Image, cfg *container.Config) bool {
|
||||
if target == nil || parent == nil || cfg == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(target.History) != len(parent.History)+1 ||
|
||||
len(target.RootFS.DiffIDs) != len(parent.RootFS.DiffIDs)+1 {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range parent.History {
|
||||
if !reflect.DeepEqual(parent.History[i], target.History[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
childCreatedBy := target.History[len(target.History)-1].CreatedBy
|
||||
return childCreatedBy == strings.Join(cfg.Cmd, " ")
|
||||
}
|
||||
|
|
|
@ -68,6 +68,21 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima
|
|||
exposedPorts[nat.Port(k)] = v
|
||||
}
|
||||
|
||||
var imgHistory []image.History
|
||||
for _, h := range ociimage.History {
|
||||
var created time.Time
|
||||
if h.Created != nil {
|
||||
created = *h.Created
|
||||
}
|
||||
imgHistory = append(imgHistory, image.History{
|
||||
Created: created,
|
||||
Author: h.Author,
|
||||
CreatedBy: h.CreatedBy,
|
||||
Comment: h.Comment,
|
||||
EmptyLayer: h.EmptyLayer,
|
||||
})
|
||||
}
|
||||
|
||||
img := image.NewImage(image.ID(desc.Digest))
|
||||
img.V1Image = image.V1Image{
|
||||
ID: string(desc.Digest),
|
||||
|
@ -87,6 +102,7 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima
|
|||
}
|
||||
|
||||
img.RootFS = rootfs
|
||||
img.History = imgHistory
|
||||
|
||||
if options.Details {
|
||||
lastUpdated := time.Unix(0, 0)
|
||||
|
|
|
@ -2,23 +2,510 @@ package containerd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
cerrdefs "github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/rootfs"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
imagetypes "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
registrypkg "github.com/docker/docker/registry"
|
||||
|
||||
// "github.com/docker/docker/api/types/container"
|
||||
containerdimages "github.com/containerd/containerd/images"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/builder"
|
||||
"github.com/docker/docker/errdefs"
|
||||
dimage "github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"github.com/docker/docker/pkg/streamformatter"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetImageAndReleasableLayer returns an image and releaseable layer for a
|
||||
// reference or ID. Every call to GetImageAndReleasableLayer MUST call
|
||||
// releasableLayer.Release() to prevent leaking of layers.
|
||||
func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ROLayer, error) {
|
||||
return nil, nil, errdefs.NotImplemented(errors.New("not implemented"))
|
||||
if refOrID == "" { // from SCRATCH
|
||||
os := runtime.GOOS
|
||||
if runtime.GOOS == "windows" {
|
||||
os = "linux"
|
||||
}
|
||||
if opts.Platform != nil {
|
||||
os = opts.Platform.OS
|
||||
}
|
||||
if !system.IsOSSupported(os) {
|
||||
return nil, nil, system.ErrNotSupportedOperatingSystem
|
||||
}
|
||||
return nil, &rolayer{
|
||||
key: "",
|
||||
c: i.client,
|
||||
snapshotter: i.snapshotter,
|
||||
diffID: "",
|
||||
root: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
if opts.PullOption != backend.PullOptionForcePull {
|
||||
// TODO(laurazard): same as below
|
||||
img, err := i.GetImage(ctx, refOrID, image.GetImageOpts{Platform: opts.Platform})
|
||||
if err != nil && opts.PullOption == backend.PullOptionNoPull {
|
||||
return nil, nil, err
|
||||
}
|
||||
imgDesc, err := i.resolveDescriptor(ctx, refOrID)
|
||||
if err != nil && !errdefs.IsNotFound(err) {
|
||||
return nil, nil, err
|
||||
}
|
||||
if img != nil {
|
||||
if !system.IsOSSupported(img.OperatingSystem()) {
|
||||
return nil, nil, system.ErrNotSupportedOperatingSystem
|
||||
}
|
||||
|
||||
layer, err := newROLayerForImage(ctx, &imgDesc, i, opts, refOrID, opts.Platform)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return img, layer, nil
|
||||
}
|
||||
}
|
||||
|
||||
ctx, _, err := i.client.WithLease(ctx, leases.WithRandomID(), leases.WithExpiration(1*time.Hour))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create lease for commit: %w", err)
|
||||
}
|
||||
|
||||
// TODO(laurazard): do we really need a new method here to pull the image?
|
||||
imgDesc, err := i.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output, opts.Platform)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// TODO(laurazard): pullForBuilder should return whatever we
|
||||
// need here instead of having to go and get it again
|
||||
img, err := i.GetImage(ctx, refOrID, imagetypes.GetImageOpts{
|
||||
Platform: opts.Platform,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
layer, err := newROLayerForImage(ctx, imgDesc, i, opts, refOrID, opts.Platform)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return img, layer, nil
|
||||
}
|
||||
|
||||
func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConfigs map[string]registry.AuthConfig, output io.Writer, platform *ocispec.Platform) (*ocispec.Descriptor, error) {
|
||||
ref, err := reference.ParseNormalizedNamed(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
taggedRef := reference.TagNameOnly(ref)
|
||||
|
||||
pullRegistryAuth := ®istry.AuthConfig{}
|
||||
if len(authConfigs) > 0 {
|
||||
// The request came with a full auth config, use it
|
||||
repoInfo, err := i.registryService.ResolveRepository(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resolvedConfig := registrypkg.ResolveAuthConfig(authConfigs, repoInfo.Index)
|
||||
pullRegistryAuth = &resolvedConfig
|
||||
}
|
||||
|
||||
if err := i.PullImage(ctx, ref.Name(), taggedRef.(reference.NamedTagged).Tag(), platform, nil, pullRegistryAuth, output); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
img, err := i.GetImage(ctx, name, imagetypes.GetImageOpts{Platform: platform})
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) && img != nil && platform != nil {
|
||||
imgPlat := ocispec.Platform{
|
||||
OS: img.OS,
|
||||
Architecture: img.BaseImgArch(),
|
||||
Variant: img.BaseImgVariant(),
|
||||
}
|
||||
|
||||
p := *platform
|
||||
if !platforms.Only(p).Match(imgPlat) {
|
||||
po := streamformatter.NewJSONProgressOutput(output, false)
|
||||
progress.Messagef(po, "", `
|
||||
WARNING: Pulled image with specified platform (%s), but the resulting image's configured platform (%s) does not match.
|
||||
This is most likely caused by a bug in the build system that created the fetched image (%s).
|
||||
Please notify the image author to correct the configuration.`,
|
||||
platforms.Format(p), platforms.Format(imgPlat), name,
|
||||
)
|
||||
logrus.WithError(err).WithField("image", name).Warn("Ignoring error about platform mismatch where the manifest list points to an image whose configuration does not match the platform in the manifest.")
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !system.IsOSSupported(img.OperatingSystem()) {
|
||||
return nil, system.ErrNotSupportedOperatingSystem
|
||||
}
|
||||
|
||||
imgDesc, err := i.resolveDescriptor(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &imgDesc, err
|
||||
}
|
||||
|
||||
func newROLayerForImage(ctx context.Context, imgDesc *ocispec.Descriptor, i *ImageService, opts backend.GetImageAndLayerOptions, refOrID string, platform *ocispec.Platform) (builder.ROLayer, error) {
|
||||
if imgDesc == nil {
|
||||
return nil, fmt.Errorf("can't make an RO layer for a nil image :'(")
|
||||
}
|
||||
|
||||
platMatcher := platforms.Default()
|
||||
if platform != nil {
|
||||
platMatcher = platforms.Only(*platform)
|
||||
}
|
||||
|
||||
// this needs it's own context + lease so that it doesn't get cleaned before we're ready
|
||||
confDesc, err := containerdimages.Config(ctx, i.client.ContentStore(), *imgDesc, platMatcher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diffIDs, err := containerdimages.RootFS(ctx, i.client.ContentStore(), confDesc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parent := identity.ChainID(diffIDs).String()
|
||||
|
||||
s := i.client.SnapshotService(i.snapshotter)
|
||||
key := stringid.GenerateRandomID()
|
||||
ctx, _, err = i.client.WithLease(ctx, leases.WithRandomID(), leases.WithExpiration(1*time.Hour))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create lease for commit: %w", err)
|
||||
}
|
||||
mounts, err := s.View(ctx, key, parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tempMountLocation := os.TempDir()
|
||||
root, err := os.MkdirTemp(tempMountLocation, "rootfs-mount")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := mount.All(mounts, root); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &rolayer{
|
||||
key: key,
|
||||
c: i.client,
|
||||
snapshotter: i.snapshotter,
|
||||
diffID: digest.Digest(parent),
|
||||
root: root,
|
||||
contentStoreDigest: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
type rolayer struct {
|
||||
key string
|
||||
c *containerd.Client
|
||||
snapshotter string
|
||||
diffID digest.Digest
|
||||
root string
|
||||
contentStoreDigest digest.Digest
|
||||
}
|
||||
|
||||
func (rl *rolayer) ContentStoreDigest() digest.Digest {
|
||||
return rl.contentStoreDigest
|
||||
}
|
||||
|
||||
func (rl *rolayer) DiffID() layer.DiffID {
|
||||
if rl.diffID == "" {
|
||||
return layer.DigestSHA256EmptyTar
|
||||
}
|
||||
return layer.DiffID(rl.diffID)
|
||||
}
|
||||
|
||||
func (rl *rolayer) Release() error {
|
||||
snapshotter := rl.c.SnapshotService(rl.snapshotter)
|
||||
err := snapshotter.Remove(context.TODO(), rl.key)
|
||||
if err != nil && !cerrdefs.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if rl.root == "" { // nothing to release
|
||||
return nil
|
||||
}
|
||||
if err := mount.UnmountAll(rl.root, 0); err != nil {
|
||||
logrus.WithError(err).WithField("root", rl.root).Error("failed to unmount ROLayer")
|
||||
return err
|
||||
}
|
||||
if err := os.Remove(rl.root); err != nil {
|
||||
logrus.WithError(err).WithField("dir", rl.root).Error("failed to remove mount temp dir")
|
||||
return err
|
||||
}
|
||||
rl.root = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewRWLayer creates a new read-write layer for the builder
|
||||
func (rl *rolayer) NewRWLayer() (builder.RWLayer, error) {
|
||||
snapshotter := rl.c.SnapshotService(rl.snapshotter)
|
||||
|
||||
// we need this here for the prepared snapshots or
|
||||
// we'll have racy behaviour where sometimes they
|
||||
// will get GC'd before we commit/use them
|
||||
ctx, _, err := rl.c.WithLease(context.TODO(), leases.WithRandomID(), leases.WithExpiration(1*time.Hour))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create lease for commit: %w", err)
|
||||
}
|
||||
|
||||
key := stringid.GenerateRandomID()
|
||||
mounts, err := snapshotter.Prepare(ctx, key, rl.diffID.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root, err := os.MkdirTemp(os.TempDir(), "rootfs-mount")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := mount.All(mounts, root); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &rwlayer{
|
||||
key: key,
|
||||
parent: rl.key,
|
||||
c: rl.c,
|
||||
snapshotter: rl.snapshotter,
|
||||
root: root,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type rwlayer struct {
|
||||
key string
|
||||
parent string
|
||||
c *containerd.Client
|
||||
snapshotter string
|
||||
root string
|
||||
}
|
||||
|
||||
func (rw *rwlayer) Root() string {
|
||||
return rw.root
|
||||
}
|
||||
|
||||
func (rw *rwlayer) Commit() (builder.ROLayer, error) {
|
||||
// we need this here for the prepared snapshots or
|
||||
// we'll have racy behaviour where sometimes they
|
||||
// will get GC'd before we commit/use them
|
||||
ctx, _, err := rw.c.WithLease(context.TODO(), leases.WithRandomID(), leases.WithExpiration(1*time.Hour))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create lease for commit: %w", err)
|
||||
}
|
||||
snapshotter := rw.c.SnapshotService(rw.snapshotter)
|
||||
|
||||
key := stringid.GenerateRandomID()
|
||||
err = snapshotter.Commit(ctx, key, rw.key)
|
||||
if err != nil && !cerrdefs.IsAlreadyExists(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
differ := rw.c.DiffService()
|
||||
desc, err := rootfs.CreateDiff(ctx, key, snapshotter, differ)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := rw.c.ContentStore().Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diffIDStr, ok := info.Labels["containerd.io/uncompressed"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid differ response with no diffID")
|
||||
}
|
||||
diffID, err := digest.Parse(diffIDStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &rolayer{
|
||||
key: key,
|
||||
c: rw.c,
|
||||
snapshotter: rw.snapshotter,
|
||||
diffID: diffID,
|
||||
root: "",
|
||||
contentStoreDigest: desc.Digest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (rw *rwlayer) Release() error {
|
||||
snapshotter := rw.c.SnapshotService(rw.snapshotter)
|
||||
err := snapshotter.Remove(context.TODO(), rw.key)
|
||||
if err != nil && !cerrdefs.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
if rw.root == "" { // nothing to release
|
||||
return nil
|
||||
}
|
||||
if err := mount.UnmountAll(rw.root, 0); err != nil {
|
||||
logrus.WithError(err).WithField("root", rw.root).Error("failed to unmount ROLayer")
|
||||
return err
|
||||
}
|
||||
if err := os.Remove(rw.root); err != nil {
|
||||
logrus.WithError(err).WithField("dir", rw.root).Error("failed to remove mount temp dir")
|
||||
return err
|
||||
}
|
||||
rw.root = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateImage creates a new image by adding a config and ID to the image store.
|
||||
// This is similar to LoadImage() except that it receives JSON encoded bytes of
|
||||
// an image instead of a tar archive.
|
||||
func (i *ImageService) CreateImage(config []byte, parent string) (builder.Image, error) {
|
||||
return nil, errdefs.NotImplemented(errors.New("not implemented"))
|
||||
func (i *ImageService) CreateImage(ctx context.Context, config []byte, parent string, layerDigest digest.Digest) (builder.Image, error) {
|
||||
imgToCreate, err := dimage.NewFromJSON(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rootfs := ocispec.RootFS{
|
||||
Type: imgToCreate.RootFS.Type,
|
||||
DiffIDs: []digest.Digest{},
|
||||
}
|
||||
for _, diffId := range imgToCreate.RootFS.DiffIDs {
|
||||
rootfs.DiffIDs = append(rootfs.DiffIDs, digest.Digest(diffId))
|
||||
}
|
||||
exposedPorts := make(map[string]struct{}, len(imgToCreate.Config.ExposedPorts))
|
||||
for k, v := range imgToCreate.Config.ExposedPorts {
|
||||
exposedPorts[string(k)] = v
|
||||
}
|
||||
|
||||
var ociHistory []ocispec.History
|
||||
for _, history := range imgToCreate.History {
|
||||
created := history.Created
|
||||
ociHistory = append(ociHistory, ocispec.History{
|
||||
Created: &created,
|
||||
CreatedBy: history.CreatedBy,
|
||||
Author: history.Author,
|
||||
Comment: history.Comment,
|
||||
EmptyLayer: history.EmptyLayer,
|
||||
})
|
||||
}
|
||||
|
||||
// make an ocispec.Image from the docker/image.Image
|
||||
ociImgToCreate := ocispec.Image{
|
||||
Created: &imgToCreate.Created,
|
||||
Author: imgToCreate.Author,
|
||||
Architecture: imgToCreate.Architecture,
|
||||
Variant: imgToCreate.Variant,
|
||||
OS: imgToCreate.OS,
|
||||
OSVersion: imgToCreate.OSVersion,
|
||||
OSFeatures: imgToCreate.OSFeatures,
|
||||
Config: ocispec.ImageConfig{
|
||||
User: imgToCreate.Config.User,
|
||||
ExposedPorts: exposedPorts,
|
||||
Env: imgToCreate.Config.Env,
|
||||
Entrypoint: imgToCreate.Config.Entrypoint,
|
||||
Cmd: imgToCreate.Config.Cmd,
|
||||
Volumes: imgToCreate.Config.Volumes,
|
||||
WorkingDir: imgToCreate.Config.WorkingDir,
|
||||
Labels: imgToCreate.Config.Labels,
|
||||
StopSignal: imgToCreate.Config.StopSignal,
|
||||
},
|
||||
RootFS: rootfs,
|
||||
History: ociHistory,
|
||||
}
|
||||
|
||||
var layers []ocispec.Descriptor
|
||||
// if the image has a parent, we need to start with the parents layers descriptors
|
||||
if parent != "" {
|
||||
parentDesc, err := i.resolveDescriptor(ctx, parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentImageManifest, err := containerdimages.Manifest(ctx, i.client.ContentStore(), parentDesc, platforms.Default())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
layers = parentImageManifest.Layers
|
||||
}
|
||||
|
||||
// get the info for the new layers
|
||||
info, err := i.client.ContentStore().Info(ctx, layerDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// append the new layer descriptor
|
||||
layers = append(layers,
|
||||
ocispec.Descriptor{
|
||||
MediaType: containerdimages.MediaTypeDockerSchema2LayerGzip,
|
||||
Digest: layerDigest,
|
||||
Size: info.Size,
|
||||
},
|
||||
)
|
||||
|
||||
// necessary to prevent the contents from being GC'd
|
||||
// between writing them here and creating an image
|
||||
ctx, done, err := i.client.WithLease(ctx, leases.WithRandomID(), leases.WithExpiration(1*time.Hour))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer done(ctx)
|
||||
|
||||
commitManifestDesc, err := writeContentsForImage(ctx, i.snapshotter, i.client.ContentStore(), ociImgToCreate, layers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// image create
|
||||
img := containerdimages.Image{
|
||||
Name: danglingImageName(commitManifestDesc.Digest),
|
||||
Target: commitManifestDesc,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
createdImage, err := i.client.ImageService().Update(ctx, img)
|
||||
if err != nil {
|
||||
if !cerrdefs.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if createdImage, err = i.client.ImageService().Create(ctx, img); err != nil {
|
||||
return nil, fmt.Errorf("failed to create new image: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := i.unpackImage(ctx, createdImage, platforms.DefaultSpec()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newImage := dimage.NewImage(dimage.ID(createdImage.Target.Digest))
|
||||
newImage.V1Image = imgToCreate.V1Image
|
||||
newImage.V1Image.ID = string(createdImage.Target.Digest)
|
||||
newImage.History = imgToCreate.History
|
||||
return newImage, nil
|
||||
}
|
||||
|
|
|
@ -127,3 +127,71 @@ func isRootfsChildOf(child ocispec.RootFS, parent ocispec.RootFS) bool {
|
|||
|
||||
return true
|
||||
}
|
||||
|
||||
// parents returns a slice of image IDs whose entire rootfs contents match,
|
||||
// in order, the childs first layers, excluding images with the exact same
|
||||
// rootfs.
|
||||
//
|
||||
// Called from image_delete.go to prune dangling parents.
|
||||
func (i *ImageService) parents(ctx context.Context, id image.ID) ([]imageWithRootfs, error) {
|
||||
target, err := i.resolveDescriptor(ctx, id.String())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get child image")
|
||||
}
|
||||
|
||||
cs := i.client.ContentStore()
|
||||
|
||||
allPlatforms, err := containerdimages.Platforms(ctx, cs, target)
|
||||
if err != nil {
|
||||
return nil, errdefs.System(errors.Wrap(err, "failed to list platforms supported by image"))
|
||||
}
|
||||
|
||||
var childRootFS []ocispec.RootFS
|
||||
for _, platform := range allPlatforms {
|
||||
rootfs, err := platformRootfs(ctx, cs, target, platform)
|
||||
if err != nil {
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return nil, errdefs.System(errors.Wrap(err, "failed to get platform-specific rootfs"))
|
||||
}
|
||||
|
||||
childRootFS = append(childRootFS, rootfs)
|
||||
}
|
||||
|
||||
imgs, err := i.client.ImageService().List(ctx)
|
||||
if err != nil {
|
||||
return nil, errdefs.System(errors.Wrap(err, "failed to list all images"))
|
||||
}
|
||||
|
||||
var parents []imageWithRootfs
|
||||
for _, img := range imgs {
|
||||
nextImage:
|
||||
for _, platform := range allPlatforms {
|
||||
rootfs, err := platformRootfs(ctx, cs, img.Target, platform)
|
||||
if err != nil {
|
||||
if cerrdefs.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return nil, errdefs.System(errors.Wrap(err, "failed to get platform-specific rootfs"))
|
||||
}
|
||||
|
||||
for _, childRoot := range childRootFS {
|
||||
if isRootfsChildOf(childRoot, rootfs) {
|
||||
parents = append(parents, imageWithRootfs{
|
||||
img: img,
|
||||
rootfs: rootfs,
|
||||
})
|
||||
break nextImage
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return parents, nil
|
||||
}
|
||||
|
||||
type imageWithRootfs struct {
|
||||
img containerdimages.Image
|
||||
rootfs ocispec.RootFS
|
||||
}
|
||||
|
|
|
@ -6,9 +6,9 @@ import (
|
|||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
|
@ -19,7 +19,6 @@ import (
|
|||
"github.com/containerd/containerd/rootfs"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/docker/docker/api/types/backend"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
|
@ -142,10 +141,11 @@ func generateCommitImageConfig(baseConfig ocispec.Image, diffID digest.Digest, o
|
|||
DiffIDs: append(baseConfig.RootFS.DiffIDs, diffID),
|
||||
},
|
||||
History: append(baseConfig.History, ocispec.History{
|
||||
Created: &createdTime,
|
||||
CreatedBy: "", // FIXME(ndeloof) ?
|
||||
Author: opts.Author,
|
||||
Comment: opts.Comment,
|
||||
Created: &createdTime,
|
||||
CreatedBy: strings.Join(opts.ContainerConfig.Cmd, " "),
|
||||
Author: opts.Author,
|
||||
Comment: opts.Comment,
|
||||
// TODO(laurazard): this check might be incorrect
|
||||
EmptyLayer: diffID == "",
|
||||
}),
|
||||
}
|
||||
|
@ -297,5 +297,13 @@ func uniquePart() string {
|
|||
//
|
||||
// This is a temporary shim. Should be removed when builder stops using commit.
|
||||
func (i *ImageService) CommitBuildStep(ctx context.Context, c backend.CommitConfig) (image.ID, error) {
|
||||
return "", errdefs.NotImplemented(errors.New("not implemented"))
|
||||
ctr := i.containers.Get(c.ContainerID)
|
||||
if ctr == nil {
|
||||
// TODO: use typed error
|
||||
return "", fmt.Errorf("container not found: %s", c.ContainerID)
|
||||
}
|
||||
c.ContainerMountLabel = ctr.MountLabel
|
||||
c.ContainerOS = ctr.OS
|
||||
c.ParentImageID = string(ctr.ImageID)
|
||||
return i.CommitImage(ctx, c)
|
||||
}
|
||||
|
|
|
@ -2,10 +2,16 @@ package containerd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/container"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -30,8 +36,6 @@ import (
|
|||
// are divided into two categories grouped by their severity:
|
||||
//
|
||||
// Hard Conflict:
|
||||
// - a pull or build using the image.
|
||||
// - any descendant image.
|
||||
// - any running container using the image.
|
||||
//
|
||||
// Soft Conflict:
|
||||
|
@ -45,8 +49,6 @@ import (
|
|||
// meaning any delete conflicts will cause the image to not be deleted and the
|
||||
// conflict will not be reported.
|
||||
//
|
||||
// TODO(thaJeztah): implement ImageDelete "force" options; see https://github.com/moby/moby/issues/43850
|
||||
// TODO(thaJeztah): implement ImageDelete "prune" options; see https://github.com/moby/moby/issues/43849
|
||||
// TODO(thaJeztah): image delete should send prometheus counters; see https://github.com/moby/moby/issues/45268
|
||||
func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) {
|
||||
parsedRef, err := reference.ParseNormalizedNamed(imageRef)
|
||||
|
@ -59,28 +61,278 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
possiblyDeletedConfigs := map[digest.Digest]struct{}{}
|
||||
if err := i.walkPresentChildren(ctx, img.Target, func(_ context.Context, d ocispec.Descriptor) {
|
||||
if images.IsConfigType(d.MediaType) {
|
||||
possiblyDeletedConfigs[d.Digest] = struct{}{}
|
||||
}
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
imgID := image.ID(img.Target.Digest)
|
||||
|
||||
if isImageIDPrefix(imgID.String(), imageRef) {
|
||||
return i.deleteAll(ctx, img, force, prune)
|
||||
}
|
||||
|
||||
err = i.client.ImageService().Delete(ctx, img.Name, images.SynchronousDelete())
|
||||
singleRef, err := i.isSingleReference(ctx, img)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Workaround for: https://github.com/moby/buildkit/issues/3797
|
||||
if err := i.unleaseSnapshotsFromDeletedConfigs(context.Background(), possiblyDeletedConfigs); err != nil {
|
||||
logrus.WithError(err).Warn("failed to unlease snapshots")
|
||||
if !singleRef {
|
||||
err := i.client.ImageService().Delete(ctx, img.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i.LogImageEvent(imgID.String(), imgID.String(), "untag")
|
||||
records := []types.ImageDeleteResponseItem{{Untagged: reference.FamiliarString(reference.TagNameOnly(parsedRef))}}
|
||||
return records, nil
|
||||
}
|
||||
|
||||
imgID := string(img.Target.Digest)
|
||||
i.LogImageEvent(imgID, imgID, "untag")
|
||||
i.LogImageEvent(imgID, imgID, "delete")
|
||||
using := func(c *container.Container) bool {
|
||||
return c.ImageID == imgID
|
||||
}
|
||||
ctr := i.containers.First(using)
|
||||
if ctr != nil {
|
||||
if !force {
|
||||
// If we removed the repository reference then
|
||||
// this image would remain "dangling" and since
|
||||
// we really want to avoid that the client must
|
||||
// explicitly force its removal.
|
||||
refString := reference.FamiliarString(reference.TagNameOnly(parsedRef))
|
||||
err := &imageDeleteConflict{
|
||||
reference: refString,
|
||||
used: true,
|
||||
message: fmt.Sprintf("container %s is using its referenced image %s",
|
||||
stringid.TruncateID(ctr.ID),
|
||||
stringid.TruncateID(imgID.String())),
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []types.ImageDeleteResponseItem{{Untagged: reference.FamiliarString(parsedRef)}}, nil
|
||||
err := i.softImageDelete(ctx, img)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
i.LogImageEvent(imgID.String(), imgID.String(), "untag")
|
||||
records := []types.ImageDeleteResponseItem{{Untagged: reference.FamiliarString(reference.TagNameOnly(parsedRef))}}
|
||||
return records, nil
|
||||
}
|
||||
|
||||
return i.deleteAll(ctx, img, force, prune)
|
||||
}
|
||||
|
||||
// deleteAll deletes the image from the daemon, and if prune is true,
|
||||
// also deletes dangling parents if there is no conflict in doing so.
|
||||
// Parent images are removed quietly, and if there is any issue/conflict
|
||||
// it is logged but does not halt execution/an error is not returned.
|
||||
func (i *ImageService) deleteAll(ctx context.Context, img images.Image, force, prune bool) ([]types.ImageDeleteResponseItem, error) {
|
||||
var records []types.ImageDeleteResponseItem
|
||||
|
||||
// Workaround for: https://github.com/moby/buildkit/issues/3797
|
||||
possiblyDeletedConfigs := map[digest.Digest]struct{}{}
|
||||
err := i.walkPresentChildren(ctx, img.Target, func(_ context.Context, d ocispec.Descriptor) {
|
||||
if images.IsConfigType(d.MediaType) {
|
||||
possiblyDeletedConfigs[d.Digest] = struct{}{}
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := i.unleaseSnapshotsFromDeletedConfigs(context.Background(), possiblyDeletedConfigs); err != nil {
|
||||
logrus.WithError(err).Warn("failed to unlease snapshots")
|
||||
}
|
||||
}()
|
||||
|
||||
imgID := img.Target.Digest.String()
|
||||
|
||||
var parents []imageWithRootfs
|
||||
if prune {
|
||||
parents, err = i.parents(ctx, image.ID(imgID))
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warn("failed to get image parents")
|
||||
}
|
||||
sortParentsByAffinity(parents)
|
||||
}
|
||||
|
||||
imageRefs, err := i.client.ImageService().List(ctx, "target.digest=="+imgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, imageRef := range imageRefs {
|
||||
if err := i.imageDeleteHelper(ctx, imageRef, &records, force); err != nil {
|
||||
return records, err
|
||||
}
|
||||
}
|
||||
i.LogImageEvent(imgID, imgID, "delete")
|
||||
records = append(records, types.ImageDeleteResponseItem{Deleted: imgID})
|
||||
|
||||
for _, parent := range parents {
|
||||
if !isDanglingImage(parent.img) {
|
||||
break
|
||||
}
|
||||
err = i.imageDeleteHelper(ctx, parent.img, &records, false)
|
||||
if err != nil {
|
||||
logrus.WithError(err).Warn("failed to remove image parent")
|
||||
break
|
||||
}
|
||||
parentID := parent.img.Target.Digest.String()
|
||||
i.LogImageEvent(parentID, parentID, "delete")
|
||||
records = append(records, types.ImageDeleteResponseItem{Deleted: parentID})
|
||||
}
|
||||
|
||||
return records, nil
|
||||
}
|
||||
|
||||
// isImageIDPrefix returns whether the given
|
||||
// possiblePrefix is a prefix of the given imageID.
|
||||
func isImageIDPrefix(imageID, possiblePrefix string) bool {
|
||||
if strings.HasPrefix(imageID, possiblePrefix) {
|
||||
return true
|
||||
}
|
||||
if i := strings.IndexRune(imageID, ':'); i >= 0 {
|
||||
return strings.HasPrefix(imageID[i+1:], possiblePrefix)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func sortParentsByAffinity(parents []imageWithRootfs) {
|
||||
sort.Slice(parents, func(i, j int) bool {
|
||||
lenRootfsI := len(parents[i].rootfs.DiffIDs)
|
||||
lenRootfsJ := len(parents[j].rootfs.DiffIDs)
|
||||
if lenRootfsI == lenRootfsJ {
|
||||
return isDanglingImage(parents[i].img)
|
||||
}
|
||||
return lenRootfsI > lenRootfsJ
|
||||
})
|
||||
}
|
||||
|
||||
// isSingleReference returns true if there are no other images in the
|
||||
// daemon targeting the same content as `img` that are not dangling.
|
||||
func (i *ImageService) isSingleReference(ctx context.Context, img images.Image) (bool, error) {
|
||||
refs, err := i.client.ImageService().List(ctx, "target.digest=="+img.Target.Digest.String())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, ref := range refs {
|
||||
if !isDanglingImage(ref) && ref.Name != img.Name {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type conflictType int
|
||||
|
||||
const (
|
||||
conflictRunningContainer conflictType = 1 << iota
|
||||
conflictActiveReference
|
||||
conflictStoppedContainer
|
||||
conflictHard = conflictRunningContainer
|
||||
conflictSoft = conflictActiveReference | conflictStoppedContainer
|
||||
)
|
||||
|
||||
// imageDeleteHelper attempts to delete the given image from this daemon.
|
||||
// If the image has any hard delete conflicts (running containers using
|
||||
// the image) then it cannot be deleted. If the image has any soft delete
|
||||
// conflicts (any tags/digests referencing the image or any stopped container
|
||||
// using the image) then it can only be deleted if force is true. Any deleted
|
||||
// images and untagged references are appended to the given records. If any
|
||||
// error or conflict is encountered, it will be returned immediately without
|
||||
// deleting the image.
|
||||
func (i *ImageService) imageDeleteHelper(ctx context.Context, img images.Image, records *[]types.ImageDeleteResponseItem, force bool) error {
|
||||
// First, determine if this image has any conflicts. Ignore soft conflicts
|
||||
// if force is true.
|
||||
c := conflictHard
|
||||
if !force {
|
||||
c |= conflictSoft
|
||||
}
|
||||
|
||||
imgID := image.ID(img.Target.Digest)
|
||||
|
||||
err := i.checkImageDeleteConflict(ctx, imgID, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
untaggedRef, err := reference.ParseAnyReference(img.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = i.client.ImageService().Delete(ctx, img.Name, images.SynchronousDelete())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i.LogImageEvent(imgID.String(), imgID.String(), "untag")
|
||||
*records = append(*records, types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(untaggedRef)})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImageDeleteConflict holds a soft or hard conflict and associated
|
||||
// error. A hard conflict represents a running container using the
|
||||
// image, while a soft conflict is any tags/digests referencing the
|
||||
// given image or any stopped container using the image.
|
||||
// Implements the error interface.
|
||||
type imageDeleteConflict struct {
|
||||
hard bool
|
||||
used bool
|
||||
reference string
|
||||
message string
|
||||
}
|
||||
|
||||
func (idc *imageDeleteConflict) Error() string {
|
||||
var forceMsg string
|
||||
if idc.hard {
|
||||
forceMsg = "cannot be forced"
|
||||
} else {
|
||||
forceMsg = "must be forced"
|
||||
}
|
||||
return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", idc.reference, forceMsg, idc.message)
|
||||
}
|
||||
|
||||
func (imageDeleteConflict) Conflict() {}
|
||||
|
||||
// checkImageDeleteConflict returns a conflict representing
|
||||
// any issue preventing deletion of the given image ID, and
|
||||
// nil if there are none. It takes a bitmask representing a
|
||||
// filter for which conflict types the caller cares about,
|
||||
// and will only check for these conflict types.
|
||||
func (i *ImageService) checkImageDeleteConflict(ctx context.Context, imgID image.ID, mask conflictType) error {
|
||||
if mask&conflictRunningContainer != 0 {
|
||||
running := func(c *container.Container) bool {
|
||||
return c.ImageID == imgID && c.IsRunning()
|
||||
}
|
||||
if ctr := i.containers.First(running); ctr != nil {
|
||||
return &imageDeleteConflict{
|
||||
reference: stringid.TruncateID(imgID.String()),
|
||||
hard: true,
|
||||
used: true,
|
||||
message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(ctr.ID)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if mask&conflictStoppedContainer != 0 {
|
||||
stopped := func(c *container.Container) bool {
|
||||
return !c.IsRunning() && c.ImageID == imgID
|
||||
}
|
||||
if ctr := i.containers.First(stopped); ctr != nil {
|
||||
return &imageDeleteConflict{
|
||||
reference: stringid.TruncateID(imgID.String()),
|
||||
used: true,
|
||||
message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(ctr.ID)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if mask&conflictActiveReference != 0 {
|
||||
refs, err := i.client.ImageService().List(ctx, "target.digest=="+imgID.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(refs) > 1 {
|
||||
return &imageDeleteConflict{
|
||||
reference: stringid.TruncateID(imgID.String()),
|
||||
message: "image is referenced in multiple repositories",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -90,13 +90,16 @@ func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*imaget
|
|||
return nil, err
|
||||
}
|
||||
|
||||
tags := make([]string, len(tagged))
|
||||
for i, t := range tagged {
|
||||
var tags []string
|
||||
for _, t := range tagged {
|
||||
if isDanglingImage(t) {
|
||||
continue
|
||||
}
|
||||
name, err := reference.ParseNamed(t.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tags[i] = reference.FamiliarString(name)
|
||||
tags = append(tags, reference.FamiliarString(name))
|
||||
}
|
||||
history[0].Tags = tags
|
||||
}
|
||||
|
|
|
@ -24,7 +24,12 @@ func (i *ImageService) newResolverFromAuthConfig(authConfig *registrytypes.AuthC
|
|||
}), tracker
|
||||
}
|
||||
|
||||
func hostsWrapper(hostsFn docker.RegistryHosts, authConfig *registrytypes.AuthConfig, regService RegistryConfigProvider) docker.RegistryHosts {
|
||||
func hostsWrapper(hostsFn docker.RegistryHosts, optAuthConfig *registrytypes.AuthConfig, regService RegistryConfigProvider) docker.RegistryHosts {
|
||||
var authorizer docker.Authorizer
|
||||
if optAuthConfig != nil {
|
||||
authorizer = docker.NewDockerAuthorizer(authorizationCredsFromAuthConfig(*optAuthConfig))
|
||||
}
|
||||
|
||||
return func(n string) ([]docker.RegistryHost, error) {
|
||||
hosts, err := hostsFn(n)
|
||||
if err != nil {
|
||||
|
@ -33,12 +38,7 @@ func hostsWrapper(hostsFn docker.RegistryHosts, authConfig *registrytypes.AuthCo
|
|||
|
||||
for i := range hosts {
|
||||
if hosts[i].Authorizer == nil {
|
||||
var opts []docker.AuthorizerOpt
|
||||
if authConfig != nil {
|
||||
opts = append(opts, authorizationCredsFromAuthConfig(*authConfig))
|
||||
}
|
||||
hosts[i].Authorizer = docker.NewDockerAuthorizer(opts...)
|
||||
|
||||
hosts[i].Authorizer = authorizer
|
||||
isInsecure := regService.IsInsecureRegistry(hosts[i].Host)
|
||||
if hosts[i].Client.Transport != nil && isInsecure {
|
||||
hosts[i].Client.Transport = httpFallback{super: hosts[i].Client.Transport}
|
||||
|
@ -51,13 +51,16 @@ func hostsWrapper(hostsFn docker.RegistryHosts, authConfig *registrytypes.AuthCo
|
|||
|
||||
func authorizationCredsFromAuthConfig(authConfig registrytypes.AuthConfig) docker.AuthorizerOpt {
|
||||
cfgHost := registry.ConvertToHostname(authConfig.ServerAddress)
|
||||
if cfgHost == registry.IndexHostname {
|
||||
if cfgHost == "" || cfgHost == registry.IndexHostname {
|
||||
cfgHost = registry.DefaultRegistryHost
|
||||
}
|
||||
|
||||
return docker.WithAuthCreds(func(host string) (string, string, error) {
|
||||
if cfgHost != host {
|
||||
logrus.WithField("host", host).WithField("cfgHost", cfgHost).Warn("Host doesn't match")
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"host": host,
|
||||
"cfgHost": cfgHost,
|
||||
}).Warn("Host doesn't match")
|
||||
return "", "", nil
|
||||
}
|
||||
if authConfig.IdentityToken != "" {
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/containerd/containerd/plugin"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/docker/distribution/reference"
|
||||
imagetypes "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/container"
|
||||
daemonevents "github.com/docker/docker/daemon/events"
|
||||
|
@ -17,6 +18,7 @@ import (
|
|||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
@ -41,6 +43,7 @@ type RegistryHostsProvider interface {
|
|||
|
||||
type RegistryConfigProvider interface {
|
||||
IsInsecureRegistry(host string) bool
|
||||
ResolveRepository(name reference.Named) (*registry.RepositoryInfo, error)
|
||||
}
|
||||
|
||||
type ImageServiceConfig struct {
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/daemon/graphdriver/graphtest"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -17,8 +16,6 @@ func init() {
|
|||
// errors or hangs to be debugged directly from the test process.
|
||||
untar = archive.UntarUncompressed
|
||||
graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer
|
||||
|
||||
reexec.Init()
|
||||
}
|
||||
|
||||
// This avoids creating a new driver for each test if all tests are run
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"github.com/docker/docker/daemon/graphdriver"
|
||||
"github.com/docker/docker/daemon/graphdriver/graphtest"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -18,8 +17,6 @@ func init() {
|
|||
// errors or hangs to be debugged directly from the test process.
|
||||
untar = archive.UntarUncompressed
|
||||
graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer
|
||||
|
||||
reexec.Init()
|
||||
}
|
||||
|
||||
func skipIfNaive(t *testing.T) {
|
||||
|
|
|
@ -7,14 +7,8 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/docker/docker/daemon/graphdriver/graphtest"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Init()
|
||||
}
|
||||
|
||||
// This avoids creating a new driver for each test if all tests are run
|
||||
// Make sure to put new tests between TestVfsSetup and TestVfsTeardown
|
||||
func TestVfsSetup(t *testing.T) {
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/opencontainers/go-digest"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
|
@ -27,7 +28,7 @@ type ImageService interface {
|
|||
|
||||
PullImage(ctx context.Context, name, tag string, platform *v1.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
PushImage(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error
|
||||
CreateImage(config []byte, parent string) (builder.Image, error)
|
||||
CreateImage(ctx context.Context, config []byte, parent string, contentStoreDigest digest.Digest) (builder.Image, error)
|
||||
ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error)
|
||||
ExportImage(ctx context.Context, names []string, outStream io.Writer) error
|
||||
PerformWithBaseFS(ctx context.Context, c *container.Container, fn func(string) error) error
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
registrypkg "github.com/docker/docker/registry"
|
||||
"github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
@ -30,6 +31,10 @@ type roLayer struct {
|
|||
roLayer layer.Layer
|
||||
}
|
||||
|
||||
func (l *roLayer) ContentStoreDigest() digest.Digest {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (l *roLayer) DiffID() layer.DiffID {
|
||||
if l.roLayer == nil {
|
||||
return layer.DigestSHA256EmptyTar
|
||||
|
@ -241,7 +246,7 @@ func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID s
|
|||
// CreateImage creates a new image by adding a config and ID to the image store.
|
||||
// This is similar to LoadImage() except that it receives JSON encoded bytes of
|
||||
// an image instead of a tar archive.
|
||||
func (i *ImageService) CreateImage(config []byte, parent string) (builder.Image, error) {
|
||||
func (i *ImageService) CreateImage(ctx context.Context, config []byte, parent string, _ digest.Digest) (builder.Image, error) {
|
||||
id, err := i.imageStore.Create(config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create image")
|
||||
|
|
|
@ -78,7 +78,8 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine
|
|||
}
|
||||
|
||||
attributes := map[string]string{
|
||||
"exitCode": strconv.Itoa(exitStatus.ExitCode),
|
||||
"exitCode": strconv.Itoa(exitStatus.ExitCode),
|
||||
"execDuration": strconv.Itoa(int(execDuration.Seconds())),
|
||||
}
|
||||
daemon.Cleanup(c)
|
||||
|
||||
|
|
|
@ -59,6 +59,11 @@ variable "GITHUB_SHA" {
|
|||
default = ""
|
||||
}
|
||||
|
||||
# Special target: https://github.com/docker/metadata-action#bake-definition
|
||||
target "docker-metadata-action" {
|
||||
tags = ["moby-bin:local"]
|
||||
}
|
||||
|
||||
# Defines the output folder
|
||||
variable "DESTDIR" {
|
||||
default = ""
|
||||
|
@ -152,6 +157,29 @@ target "all-cross" {
|
|||
inherits = ["all", "_platforms"]
|
||||
}
|
||||
|
||||
#
|
||||
# bin image
|
||||
#
|
||||
|
||||
target "bin-image" {
|
||||
inherits = ["all", "docker-metadata-action"]
|
||||
output = ["type=docker"]
|
||||
}
|
||||
|
||||
target "bin-image-cross" {
|
||||
inherits = ["bin-image"]
|
||||
output = ["type=image"]
|
||||
platforms = [
|
||||
"linux/amd64",
|
||||
"linux/arm/v6",
|
||||
"linux/arm/v7",
|
||||
"linux/arm64",
|
||||
"linux/ppc64le",
|
||||
"linux/s390x",
|
||||
"windows/amd64"
|
||||
]
|
||||
}
|
||||
|
||||
#
|
||||
# dev
|
||||
#
|
||||
|
|
|
@ -23,9 +23,9 @@ keywords: "API, Docker, rcli, REST, documentation"
|
|||
* `GET /images/json` no longer includes hardcoded `<none>:<none>` and
|
||||
`<none>@<none>` in `RepoTags` and`RepoDigests` for untagged images.
|
||||
In such cases, empty arrays will be produced instead.
|
||||
* The `VirtualSize` field in the `GET /images/{name}/json` and `GET /images//json`
|
||||
responses is deprecated and will no longer be included in API v1.44. Use the
|
||||
`Size` field instead, which contains the same information.
|
||||
* The `VirtualSize` field in the `GET /images/{name}/json`, `GET /images/json`,
|
||||
and `GET /system/df` responses is deprecated and will no longer be included
|
||||
in API v1.44. Use the `Size` field instead, which contains the same information.
|
||||
* `GET /info` now includes `no-new-privileges` in the `SecurityOptions` string
|
||||
list when this option is enabled globally. This change is not versioned, and
|
||||
affects all API versions if the daemon has this patch.
|
||||
|
|
|
@ -15,7 +15,7 @@ set -e
|
|||
# the binary version you may also need to update the vendor version to pick up
|
||||
# bug fixes or new APIs, however, usually the Go packages are built from a
|
||||
# commit from the master branch.
|
||||
: "${CONTAINERD_VERSION:=v1.7.0}"
|
||||
: "${CONTAINERD_VERSION:=v1.7.1}"
|
||||
|
||||
install_containerd() (
|
||||
echo "Install containerd version $CONTAINERD_VERSION"
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
"github.com/docker/docker/integration-cli/daemon"
|
||||
"github.com/docker/docker/integration-cli/environment"
|
||||
"github.com/docker/docker/internal/test/suite"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
testdaemon "github.com/docker/docker/testutil/daemon"
|
||||
ienv "github.com/docker/docker/testutil/environment"
|
||||
"github.com/docker/docker/testutil/fakestorage"
|
||||
|
@ -50,8 +49,6 @@ var (
|
|||
func init() {
|
||||
var err error
|
||||
|
||||
reexec.Init() // This is required for external graphdriver tests
|
||||
|
||||
testEnv, err = environment.New()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
|
@ -5,16 +5,12 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/docker/docker/testutil/environment"
|
||||
)
|
||||
|
||||
var testEnv *environment.Execution
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
var err error
|
||||
testEnv, err = environment.New()
|
||||
if err != nil {
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/docker/docker/testutil/environment"
|
||||
)
|
||||
|
||||
|
@ -13,10 +12,6 @@ var (
|
|||
testEnv *environment.Execution
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Init() // This is required for external graphdriver tests
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var err error
|
||||
testEnv, err = environment.New()
|
||||
|
|
|
@ -8,14 +8,9 @@ import (
|
|||
"github.com/docker/docker/libnetwork/config"
|
||||
"github.com/docker/docker/libnetwork/netlabel"
|
||||
"github.com/docker/docker/libnetwork/options"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
|
||||
// Select and configure the network driver
|
||||
networkType := "bridge"
|
||||
|
||||
|
|
|
@ -4,23 +4,14 @@
|
|||
package bridge
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/libnetwork/netlabel"
|
||||
"github.com/docker/docker/libnetwork/ns"
|
||||
"github.com/docker/docker/libnetwork/testutils"
|
||||
"github.com/docker/docker/libnetwork/types"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestPortMappingConfig(t *testing.T) {
|
||||
defer testutils.SetupTestOSContext(t)()
|
||||
d := newDriver()
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/libnetwork"
|
||||
|
@ -23,19 +22,9 @@ import (
|
|||
"github.com/docker/docker/libnetwork/testutils"
|
||||
"github.com/docker/docker/libnetwork/types"
|
||||
"github.com/docker/docker/pkg/plugins"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if runtime.GOOS == "windows" {
|
||||
logrus.Info("Test suite does not currently support windows")
|
||||
os.Exit(0)
|
||||
}
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
|
||||
// Cleanup local datastore file
|
||||
_ = os.Remove(datastore.DefaultScope("").Client.Address)
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@ import (
|
|||
"github.com/docker/docker/libnetwork/ns"
|
||||
"github.com/docker/docker/libnetwork/testutils"
|
||||
"github.com/docker/docker/libnetwork/types"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/vishvananda/netlink"
|
||||
"github.com/vishvananda/netlink/nl"
|
||||
"github.com/vishvananda/netns"
|
||||
|
@ -382,13 +381,6 @@ func TestLiveRestore(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestSandboxCreate(t *testing.T) {
|
||||
defer testutils.SetupTestOSContext(t)()
|
||||
|
||||
|
|
|
@ -13,6 +13,8 @@ const (
|
|||
)
|
||||
|
||||
// GetVersion returns the major and minor version of apparmor_parser.
|
||||
//
|
||||
// Deprecated: no longer used, and will be removed in the next release.
|
||||
func GetVersion() (int, error) {
|
||||
output, err := cmd("", "--version")
|
||||
if err != nil {
|
||||
|
|
|
@ -14,14 +14,9 @@ import (
|
|||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"gotest.tools/v3/skip"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Init()
|
||||
}
|
||||
|
||||
var chrootArchiver = NewArchiver(idtools.IdentityMapping{})
|
||||
|
||||
func TarUntar(src, dst string) error {
|
||||
|
|
|
@ -14,10 +14,8 @@ import (
|
|||
"github.com/docker/docker/pkg/aaparser"
|
||||
)
|
||||
|
||||
var (
|
||||
// profileDirectory is the file store for apparmor profiles and macros.
|
||||
profileDirectory = "/etc/apparmor.d"
|
||||
)
|
||||
// profileDirectory is the file store for apparmor profiles and macros.
|
||||
const profileDirectory = "/etc/apparmor.d"
|
||||
|
||||
// profileData holds information about the given profile for generation.
|
||||
type profileData struct {
|
||||
|
@ -29,8 +27,6 @@ type profileData struct {
|
|||
Imports []string
|
||||
// InnerImports defines the apparmor functions to import in the profile.
|
||||
InnerImports []string
|
||||
// Version is the {major, minor, patch} version of apparmor_parser as a single number.
|
||||
Version int
|
||||
}
|
||||
|
||||
// generateDefault creates an apparmor profile from ProfileData.
|
||||
|
@ -50,12 +46,6 @@ func (p *profileData) generateDefault(out io.Writer) error {
|
|||
p.InnerImports = append(p.InnerImports, "#include <abstractions/base>")
|
||||
}
|
||||
|
||||
ver, err := aaparser.GetVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Version = ver
|
||||
|
||||
return compiled.Execute(out, p)
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ require (
|
|||
github.com/coreos/go-systemd/v22 v22.5.0
|
||||
github.com/creack/pty v1.1.18
|
||||
github.com/deckarep/golang-set/v2 v2.3.0
|
||||
github.com/docker/distribution v2.8.1+incompatible
|
||||
github.com/docker/distribution v2.8.2+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c
|
||||
github.com/docker/go-metrics v0.0.1
|
||||
|
|
|
@ -502,8 +502,8 @@ github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TT
|
|||
github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.0.0-20200511152416-a93e9eb0e95c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
|
|
1
vendor/github.com/docker/distribution/.dockerignore
generated
vendored
Normal file
1
vendor/github.com/docker/distribution/.dockerignore
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
bin/
|
7
vendor/github.com/docker/distribution/.golangci.yml
generated
vendored
7
vendor/github.com/docker/distribution/.golangci.yml
generated
vendored
|
@ -18,3 +18,10 @@ run:
|
|||
deadline: 2m
|
||||
skip-dirs:
|
||||
- vendor
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
# io/ioutil is deprecated, but won't be removed until Go v2. It's safe to ignore for the release/2.8 branch.
|
||||
- text: "SA1019: \"io/ioutil\" has been deprecated since Go 1.16"
|
||||
linters:
|
||||
- staticcheck
|
||||
|
|
6
vendor/github.com/docker/distribution/.mailmap
generated
vendored
6
vendor/github.com/docker/distribution/.mailmap
generated
vendored
|
@ -44,6 +44,8 @@ Thomas Berger <loki@lokis-chaos.de> Thomas Berger <tbe@users.noreply.github.com>
|
|||
Samuel Karp <skarp@amazon.com> Samuel Karp <samuelkarp@users.noreply.github.com>
|
||||
Justin Cormack <justin.cormack@docker.com>
|
||||
sayboras <sayboras@yahoo.com>
|
||||
CrazyMax <github@crazymax.dev>
|
||||
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
||||
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
||||
Hayley Swimelar <hswimelar@gmail.com>
|
||||
Jose D. Gomez R <jose.gomez@suse.com>
|
||||
Shengjing Zhu <zhsj@debian.org>
|
||||
Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com>
|
||||
|
|
82
vendor/github.com/docker/distribution/Dockerfile
generated
vendored
82
vendor/github.com/docker/distribution/Dockerfile
generated
vendored
|
@ -1,49 +1,59 @@
|
|||
# syntax=docker/dockerfile:1.3
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
ARG GO_VERSION=1.16.15
|
||||
ARG GORELEASER_XX_VERSION=1.2.5
|
||||
ARG GO_VERSION=1.19.9
|
||||
ARG ALPINE_VERSION=3.16
|
||||
ARG XX_VERSION=1.2.1
|
||||
|
||||
FROM --platform=$BUILDPLATFORM crazymax/goreleaser-xx:${GORELEASER_XX_VERSION} AS goreleaser-xx
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS base
|
||||
COPY --from=goreleaser-xx / /
|
||||
RUN apk add --no-cache file git
|
||||
WORKDIR /go/src/github.com/docker/distribution
|
||||
|
||||
FROM base AS build
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine${ALPINE_VERSION} AS base
|
||||
COPY --from=xx / /
|
||||
RUN apk add --no-cache bash coreutils file git
|
||||
ENV GO111MODULE=auto
|
||||
ENV CGO_ENABLED=0
|
||||
# GIT_REF is used by goreleaser-xx to handle the proper git ref when available.
|
||||
# It will fallback to the working tree info if empty and use "git tag --points-at"
|
||||
# or "git describe" to define the version info.
|
||||
ARG GIT_REF
|
||||
ARG TARGETPLATFORM
|
||||
ARG PKG="github.com/distribution/distribution"
|
||||
ARG BUILDTAGS="include_oss include_gcs"
|
||||
RUN --mount=type=bind,rw \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=target=/go/pkg/mod,type=cache \
|
||||
goreleaser-xx --debug \
|
||||
--name="registry" \
|
||||
--dist="/out" \
|
||||
--main="./cmd/registry" \
|
||||
--flags="-v" \
|
||||
--ldflags="-s -w -X '$PKG/version.Version={{.Version}}' -X '$PKG/version.Revision={{.Commit}}' -X '$PKG/version.Package=$PKG'" \
|
||||
--tags="$BUILDTAGS" \
|
||||
--files="LICENSE" \
|
||||
--files="README.md"
|
||||
WORKDIR /go/src/github.com/docker/distribution
|
||||
|
||||
FROM scratch AS artifact
|
||||
COPY --from=build /out/*.tar.gz /
|
||||
COPY --from=build /out/*.zip /
|
||||
COPY --from=build /out/*.sha256 /
|
||||
FROM base AS version
|
||||
ARG PKG="github.com/docker/distribution"
|
||||
RUN --mount=target=. \
|
||||
VERSION=$(git describe --match 'v[0-9]*' --dirty='.m' --always --tags) REVISION=$(git rev-parse HEAD)$(if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi); \
|
||||
echo "-X ${PKG}/version.Version=${VERSION#v} -X ${PKG}/version.Revision=${REVISION} -X ${PKG}/version.Package=${PKG}" | tee /tmp/.ldflags; \
|
||||
echo -n "${VERSION}" | tee /tmp/.version;
|
||||
|
||||
FROM base AS build
|
||||
ARG TARGETPLATFORM
|
||||
ARG LDFLAGS="-s -w"
|
||||
ARG BUILDTAGS="include_oss include_gcs"
|
||||
RUN --mount=type=bind,target=/go/src/github.com/docker/distribution,rw \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
--mount=target=/go/pkg/mod,type=cache \
|
||||
--mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=version \
|
||||
set -x ; xx-go build -trimpath -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/registry ./cmd/registry \
|
||||
&& xx-verify --static /usr/bin/registry
|
||||
|
||||
FROM scratch AS binary
|
||||
COPY --from=build /usr/local/bin/registry* /
|
||||
COPY --from=build /usr/bin/registry /
|
||||
|
||||
FROM alpine:3.14
|
||||
FROM base AS releaser
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
WORKDIR /work
|
||||
RUN --mount=from=binary,target=/build \
|
||||
--mount=type=bind,target=/src \
|
||||
--mount=type=bind,source=/tmp/.version,target=/tmp/.version,from=version \
|
||||
VERSION=$(cat /tmp/.version) \
|
||||
&& mkdir -p /out \
|
||||
&& cp /build/registry /src/README.md /src/LICENSE . \
|
||||
&& tar -czvf "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" * \
|
||||
&& sha256sum -z "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz" | awk '{ print $1 }' > "/out/registry_${VERSION#v}_${TARGETOS}_${TARGETARCH}${TARGETVARIANT}.tar.gz.sha256"
|
||||
|
||||
FROM scratch AS artifact
|
||||
COPY --from=releaser /out /
|
||||
|
||||
FROM alpine:${ALPINE_VERSION}
|
||||
RUN apk add --no-cache ca-certificates
|
||||
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
|
||||
COPY --from=build /usr/local/bin/registry /bin/registry
|
||||
COPY --from=binary /registry /bin/registry
|
||||
VOLUME ["/var/lib/registry"]
|
||||
EXPOSE 5000
|
||||
ENTRYPOINT ["registry"]
|
||||
|
|
2
vendor/github.com/docker/distribution/Makefile
generated
vendored
2
vendor/github.com/docker/distribution/Makefile
generated
vendored
|
@ -50,7 +50,7 @@ version/version.go:
|
|||
|
||||
check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck")
|
||||
@echo "$(WHALE) $@"
|
||||
golangci-lint run
|
||||
@GO111MODULE=off golangci-lint run
|
||||
|
||||
test: ## run tests, except integration test with test.short
|
||||
@echo "$(WHALE) $@"
|
||||
|
|
21
vendor/github.com/docker/distribution/docker-bake.hcl
generated
vendored
21
vendor/github.com/docker/distribution/docker-bake.hcl
generated
vendored
|
@ -1,15 +1,3 @@
|
|||
// GITHUB_REF is the actual ref that triggers the workflow
|
||||
// https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables
|
||||
variable "GITHUB_REF" {
|
||||
default = ""
|
||||
}
|
||||
|
||||
target "_common" {
|
||||
args = {
|
||||
GIT_REF = GITHUB_REF
|
||||
}
|
||||
}
|
||||
|
||||
group "default" {
|
||||
targets = ["image-local"]
|
||||
}
|
||||
|
@ -20,13 +8,11 @@ target "docker-metadata-action" {
|
|||
}
|
||||
|
||||
target "binary" {
|
||||
inherits = ["_common"]
|
||||
target = "binary"
|
||||
output = ["./bin"]
|
||||
}
|
||||
|
||||
target "artifact" {
|
||||
inherits = ["_common"]
|
||||
target = "artifact"
|
||||
output = ["./bin"]
|
||||
}
|
||||
|
@ -43,8 +29,13 @@ target "artifact-all" {
|
|||
]
|
||||
}
|
||||
|
||||
// Special target: https://github.com/docker/metadata-action#bake-definition
|
||||
target "docker-metadata-action" {
|
||||
tags = ["registry:local"]
|
||||
}
|
||||
|
||||
target "image" {
|
||||
inherits = ["_common", "docker-metadata-action"]
|
||||
inherits = ["docker-metadata-action"]
|
||||
}
|
||||
|
||||
target "image-local" {
|
||||
|
|
4
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
4
vendor/github.com/docker/distribution/reference/reference.go
generated
vendored
|
@ -3,13 +3,13 @@
|
|||
//
|
||||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [domain '/'] path-component ['/' path-component]*
|
||||
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
|
|
17
vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
generated
vendored
17
vendor/github.com/docker/distribution/registry/api/v2/descriptors.go
generated
vendored
|
@ -134,6 +134,19 @@ var (
|
|||
},
|
||||
}
|
||||
|
||||
invalidPaginationResponseDescriptor = ResponseDescriptor{
|
||||
Name: "Invalid pagination number",
|
||||
Description: "The received parameter n was invalid in some way, as described by the error code. The client should resolve the issue and retry the request.",
|
||||
StatusCode: http.StatusBadRequest,
|
||||
Body: BodyDescriptor{
|
||||
ContentType: "application/json",
|
||||
Format: errorsBody,
|
||||
},
|
||||
ErrorCodes: []errcode.ErrorCode{
|
||||
ErrorCodePaginationNumberInvalid,
|
||||
},
|
||||
}
|
||||
|
||||
repositoryNotFoundResponseDescriptor = ResponseDescriptor{
|
||||
Name: "No Such Repository Error",
|
||||
StatusCode: http.StatusNotFound,
|
||||
|
@ -490,6 +503,7 @@ var routeDescriptors = []RouteDescriptor{
|
|||
},
|
||||
},
|
||||
Failures: []ResponseDescriptor{
|
||||
invalidPaginationResponseDescriptor,
|
||||
unauthorizedResponseDescriptor,
|
||||
repositoryNotFoundResponseDescriptor,
|
||||
deniedResponseDescriptor,
|
||||
|
@ -1578,6 +1592,9 @@ var routeDescriptors = []RouteDescriptor{
|
|||
},
|
||||
},
|
||||
},
|
||||
Failures: []ResponseDescriptor{
|
||||
invalidPaginationResponseDescriptor,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
9
vendor/github.com/docker/distribution/registry/api/v2/errors.go
generated
vendored
9
vendor/github.com/docker/distribution/registry/api/v2/errors.go
generated
vendored
|
@ -133,4 +133,13 @@ var (
|
|||
longer proceed.`,
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
})
|
||||
|
||||
ErrorCodePaginationNumberInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{
|
||||
Value: "PAGINATION_NUMBER_INVALID",
|
||||
Message: "invalid number of results requested",
|
||||
Description: `Returned when the "n" parameter (number of results
|
||||
to return) is not an integer, "n" is negative or "n" is bigger than
|
||||
the maximum allowed.`,
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
})
|
||||
)
|
||||
|
|
2
vendor/github.com/docker/distribution/registry/client/errors.go
generated
vendored
2
vendor/github.com/docker/distribution/registry/client/errors.go
generated
vendored
|
@ -55,6 +55,8 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
|
|||
switch statusCode {
|
||||
case http.StatusUnauthorized:
|
||||
return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details)
|
||||
case http.StatusForbidden:
|
||||
return errcode.ErrorCodeDenied.WithMessage(detailsErr.Details)
|
||||
case http.StatusTooManyRequests:
|
||||
return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details)
|
||||
default:
|
||||
|
|
4
vendor/github.com/docker/distribution/registry/client/repository.go
generated
vendored
4
vendor/github.com/docker/distribution/registry/client/repository.go
generated
vendored
|
@ -114,9 +114,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri
|
|||
return 0, err
|
||||
}
|
||||
|
||||
for cnt := range ctlg.Repositories {
|
||||
entries[cnt] = ctlg.Repositories[cnt]
|
||||
}
|
||||
copy(entries, ctlg.Repositories)
|
||||
numFilled = len(ctlg.Repositories)
|
||||
|
||||
link := resp.Header.Get("Link")
|
||||
|
|
1
vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
1
vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
|
@ -180,7 +180,6 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
|
|||
// context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
|
||||
}
|
||||
|
||||
req.Header.Add("Accept-Encoding", "identity")
|
||||
resp, err := hrs.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -362,7 +362,7 @@ github.com/deckarep/golang-set/v2
|
|||
# github.com/dimchansky/utfbom v1.1.1
|
||||
## explicit
|
||||
github.com/dimchansky/utfbom
|
||||
# github.com/docker/distribution v2.8.1+incompatible
|
||||
# github.com/docker/distribution v2.8.2+incompatible
|
||||
## explicit
|
||||
github.com/docker/distribution
|
||||
github.com/docker/distribution/digestset
|
||||
|
|
Loading…
Add table
Reference in a new issue