Browse Source

Merge pull request #47364 from vvoland/buildkit-v13

vendor: github.com/moby/buildkit v0.13.0-rc2
Sebastiaan van Stijn 1 year ago
parent
commit
220835106b
100 changed files with 3182 additions and 1785 deletions
  1. 11 0
      .github/workflows/buildkit.yml
  2. 106 33
      builder/builder-next/adapters/containerimage/pull.go
  3. 4 3
      builder/builder-next/builder.go
  4. 9 6
      builder/builder-next/controller.go
  5. 82 27
      builder/builder-next/exporter/mobyexporter/export.go
  6. 3 2
      builder/builder-next/exporter/mobyexporter/writer.go
  7. 2 2
      builder/builder-next/exporter/overrides/wrapper.go
  8. 48 4
      builder/builder-next/worker/worker.go
  9. 13 5
      cmd/dockerd/daemon.go
  10. 2 2
      hack/with-go-mod.sh
  11. 5 1
      integration/build/build_session_test.go
  12. 4 1
      integration/build/build_traces_test.go
  13. 11 5
      vendor.mod
  14. 48 11
      vendor.sum
  15. 7 7
      vendor/github.com/containerd/console/.golangci.yml
  16. 3 3
      vendor/github.com/containerd/console/README.md
  17. 6 3
      vendor/github.com/containerd/console/console.go
  18. 1 0
      vendor/github.com/containerd/console/console_linux.go
  19. 36 0
      vendor/github.com/containerd/console/console_other.go
  20. 2 1
      vendor/github.com/containerd/console/console_unix.go
  21. 14 11
      vendor/github.com/containerd/console/console_windows.go
  22. 0 163
      vendor/github.com/containerd/console/console_zos.go
  23. 1 0
      vendor/github.com/containerd/console/pty_freebsd_cgo.go
  24. 1 0
      vendor/github.com/containerd/console/pty_freebsd_nocgo.go
  25. 2 1
      vendor/github.com/containerd/console/pty_unix.go
  26. 43 0
      vendor/github.com/containerd/console/pty_zos.go
  27. 1 0
      vendor/github.com/containerd/console/tc_freebsd_cgo.go
  28. 1 0
      vendor/github.com/containerd/console/tc_freebsd_nocgo.go
  29. 1 0
      vendor/github.com/containerd/console/tc_openbsd_cgo.go
  30. 1 0
      vendor/github.com/containerd/console/tc_openbsd_nocgo.go
  31. 0 51
      vendor/github.com/containerd/console/tc_solaris_cgo.go
  32. 0 47
      vendor/github.com/containerd/console/tc_solaris_nocgo.go
  33. 3 2
      vendor/github.com/containerd/console/tc_unix.go
  34. 13 0
      vendor/github.com/containerd/console/tc_zos.go
  35. 1 2
      vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
  36. 201 0
      vendor/github.com/containernetworking/plugins/LICENSE
  37. 41 0
      vendor/github.com/containernetworking/plugins/pkg/ns/README.md
  38. 234 0
      vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go
  39. 0 34
      vendor/github.com/docker/distribution/reference/helpers_deprecated.go
  40. 0 92
      vendor/github.com/docker/distribution/reference/normalize_deprecated.go
  41. 0 172
      vendor/github.com/docker/distribution/reference/reference_deprecated.go
  42. 0 50
      vendor/github.com/docker/distribution/reference/regexp_deprecated.go
  43. 0 10
      vendor/github.com/docker/distribution/reference/sort_deprecated.go
  44. 220 2
      vendor/github.com/moby/buildkit/AUTHORS
  45. 427 195
      vendor/github.com/moby/buildkit/api/services/control/control.pb.go
  46. 15 7
      vendor/github.com/moby/buildkit/api/services/control/control.proto
  47. 74 37
      vendor/github.com/moby/buildkit/cache/blobs.go
  48. 18 4
      vendor/github.com/moby/buildkit/cache/blobs_linux.go
  49. 1 1
      vendor/github.com/moby/buildkit/cache/blobs_nolinux.go
  50. 0 16
      vendor/github.com/moby/buildkit/cache/compression.go
  51. 3 19
      vendor/github.com/moby/buildkit/cache/compression_nydus.go
  52. 2 2
      vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go
  53. 1 1
      vendor/github.com/moby/buildkit/cache/filelist.go
  54. 41 36
      vendor/github.com/moby/buildkit/cache/manager.go
  55. 54 25
      vendor/github.com/moby/buildkit/cache/refs.go
  56. 1 1
      vendor/github.com/moby/buildkit/cache/remote.go
  57. 6 0
      vendor/github.com/moby/buildkit/cache/remotecache/export.go
  58. 3 2
      vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go
  59. 2 1
      vendor/github.com/moby/buildkit/cache/remotecache/import.go
  60. 3 2
      vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go
  61. 3 2
      vendor/github.com/moby/buildkit/cache/remotecache/local/local.go
  62. 1 1
      vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go
  63. 73 24
      vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go
  64. 0 1
      vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go
  65. 13 3
      vendor/github.com/moby/buildkit/cache/util/fsutil.go
  66. 5 0
      vendor/github.com/moby/buildkit/client/build.go
  67. 3 14
      vendor/github.com/moby/buildkit/client/client.go
  68. 32 31
      vendor/github.com/moby/buildkit/client/graph.go
  69. 1 1
      vendor/github.com/moby/buildkit/client/llb/async.go
  70. 26 0
      vendor/github.com/moby/buildkit/client/llb/exec.go
  71. 12 0
      vendor/github.com/moby/buildkit/client/llb/fileop.go
  72. 8 8
      vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go
  73. 11 7
      vendor/github.com/moby/buildkit/client/llb/marshal.go
  74. 2 32
      vendor/github.com/moby/buildkit/client/llb/resolver.go
  75. 37 33
      vendor/github.com/moby/buildkit/client/llb/source.go
  76. 59 0
      vendor/github.com/moby/buildkit/client/llb/sourceresolver/imageresolver.go
  77. 54 0
      vendor/github.com/moby/buildkit/client/llb/sourceresolver/types.go
  78. 9 4
      vendor/github.com/moby/buildkit/client/llb/state.go
  79. 1 4
      vendor/github.com/moby/buildkit/client/ociindex/ociindex.go
  80. 126 87
      vendor/github.com/moby/buildkit/client/solve.go
  81. 23 0
      vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go
  82. 30 17
      vendor/github.com/moby/buildkit/control/control.go
  83. 14 3
      vendor/github.com/moby/buildkit/control/gateway/gateway.go
  84. 75 107
      vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go
  85. 183 0
      vendor/github.com/moby/buildkit/executor/containerdexecutor/executor_unix.go
  86. 106 0
      vendor/github.com/moby/buildkit/executor/containerdexecutor/executor_windows.go
  87. 0 67
      vendor/github.com/moby/buildkit/executor/oci/mounts.go
  88. 15 8
      vendor/github.com/moby/buildkit/executor/oci/resolvconf.go
  89. 18 20
      vendor/github.com/moby/buildkit/executor/oci/spec.go
  90. 57 0
      vendor/github.com/moby/buildkit/executor/oci/spec_freebsd.go
  91. 238 3
      vendor/github.com/moby/buildkit/executor/oci/spec_linux.go
  92. 0 165
      vendor/github.com/moby/buildkit/executor/oci/spec_unix.go
  93. 36 3
      vendor/github.com/moby/buildkit/executor/oci/spec_windows.go
  94. 1 1
      vendor/github.com/moby/buildkit/executor/oci/user.go
  95. 3 3
      vendor/github.com/moby/buildkit/executor/resources/monitor.go
  96. 23 22
      vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
  97. 3 3
      vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go
  98. 38 5
      vendor/github.com/moby/buildkit/exporter/containerimage/export.go
  99. 4 0
      vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/keys.go
  100. 7 4
      vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/parse.go

+ 11 - 0
.github/workflows/buildkit.yml

@@ -50,6 +50,9 @@ jobs:
     timeout-minutes: 120
     needs:
       - build
+    env:
+      TEST_IMAGE_BUILD: "0"
+      TEST_IMAGE_ID: "buildkit-tests"
     strategy:
       fail-fast: false
       matrix:
@@ -115,6 +118,14 @@ jobs:
           sudo service docker restart
           docker version
           docker info
+      -
+        name: Build test image
+        uses: docker/bake-action@v4
+        with:
+          workdir: ./buildkit
+          targets: integration-tests
+          set: |
+            *.output=type=docker,name=${{ env.TEST_IMAGE_ID }}
       -
         name: Test
         run: |

+ 106 - 33
builder/builder-next/adapters/containerimage/pull.go

@@ -9,6 +9,7 @@ import (
 	"fmt"
 	"io"
 	"path"
+	"strconv"
 	"strings"
 	"sync"
 	"time"
@@ -34,14 +35,15 @@ import (
 	pkgprogress "github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/reference"
 	"github.com/moby/buildkit/cache"
-	"github.com/moby/buildkit/client/llb"
+	"github.com/moby/buildkit/client"
+	"github.com/moby/buildkit/client/llb/sourceresolver"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/solver"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/source"
+	"github.com/moby/buildkit/source/containerimage"
 	srctypes "github.com/moby/buildkit/source/types"
 	"github.com/moby/buildkit/sourcepolicy"
-	policy "github.com/moby/buildkit/sourcepolicy/pb"
 	spb "github.com/moby/buildkit/sourcepolicy/pb"
 	"github.com/moby/buildkit/util/flightcontrol"
 	"github.com/moby/buildkit/util/imageutil"
@@ -80,9 +82,77 @@ func NewSource(opt SourceOpt) (*Source, error) {
 	return &Source{SourceOpt: opt}, nil
 }
 
-// ID returns image scheme identifier
-func (is *Source) ID() string {
-	return srctypes.DockerImageScheme
+// Schemes returns a list of SourceOp identifier schemes that this source
+// should match.
+func (is *Source) Schemes() []string {
+	return []string{srctypes.DockerImageScheme}
+}
+
+// Identifier constructs an Identifier from the given scheme, ref, and attrs,
+// all of which come from a SourceOp.
+func (is *Source) Identifier(scheme, ref string, attrs map[string]string, platform *pb.Platform) (source.Identifier, error) {
+	return is.registryIdentifier(ref, attrs, platform)
+}
+
+// Copied from github.com/moby/buildkit/source/containerimage/source.go
+func (is *Source) registryIdentifier(ref string, attrs map[string]string, platform *pb.Platform) (source.Identifier, error) {
+	id, err := containerimage.NewImageIdentifier(ref)
+	if err != nil {
+		return nil, err
+	}
+
+	if platform != nil {
+		id.Platform = &ocispec.Platform{
+			OS:           platform.OS,
+			Architecture: platform.Architecture,
+			Variant:      platform.Variant,
+			OSVersion:    platform.OSVersion,
+		}
+		if platform.OSFeatures != nil {
+			id.Platform.OSFeatures = append([]string{}, platform.OSFeatures...)
+		}
+	}
+
+	for k, v := range attrs {
+		switch k {
+		case pb.AttrImageResolveMode:
+			rm, err := resolver.ParseImageResolveMode(v)
+			if err != nil {
+				return nil, err
+			}
+			id.ResolveMode = rm
+		case pb.AttrImageRecordType:
+			rt, err := parseImageRecordType(v)
+			if err != nil {
+				return nil, err
+			}
+			id.RecordType = rt
+		case pb.AttrImageLayerLimit:
+			l, err := strconv.Atoi(v)
+			if err != nil {
+				return nil, errors.Wrapf(err, "invalid layer limit %s", v)
+			}
+			if l <= 0 {
+				return nil, errors.Errorf("invalid layer limit %s", v)
+			}
+			id.LayerLimit = &l
+		}
+	}
+
+	return id, nil
+}
+
+func parseImageRecordType(v string) (client.UsageRecordType, error) {
+	switch client.UsageRecordType(v) {
+	case "", client.UsageRecordTypeRegular:
+		return client.UsageRecordTypeRegular, nil
+	case client.UsageRecordTypeInternal:
+		return client.UsageRecordTypeInternal, nil
+	case client.UsageRecordTypeFrontend:
+		return client.UsageRecordTypeFrontend, nil
+	default:
+		return "", errors.Errorf("invalid record type %s", v)
+	}
 }
 
 func (is *Source) resolveLocal(refStr string) (*image.Image, error) {
@@ -107,7 +177,7 @@ type resolveRemoteResult struct {
 	dt   []byte
 }
 
-func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) {
+func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocispec.Platform, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
 	p := platforms.DefaultSpec()
 	if platform != nil {
 		p = *platform
@@ -116,34 +186,36 @@ func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocisp
 	key := "getconfig::" + ref + "::" + platforms.Format(p)
 	res, err := is.g.Do(ctx, key, func(ctx context.Context) (*resolveRemoteResult, error) {
 		res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g)
-		ref, dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform, []*policy.Policy{})
+		dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, platform)
 		if err != nil {
 			return nil, err
 		}
 		return &resolveRemoteResult{ref: ref, dgst: dgst, dt: dt}, nil
 	})
 	if err != nil {
-		return ref, "", nil, err
+		return "", nil, err
 	}
-	return res.ref, res.dgst, res.dt, nil
+	return res.dgst, res.dt, nil
 }
 
 // ResolveImageConfig returns image config for an image
-func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) {
+func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt sourceresolver.Opt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
+	if opt.ImageOpt == nil {
+		return "", nil, fmt.Errorf("can only resolve an image: %v, opt: %v", ref, opt)
+	}
 	ref, err := applySourcePolicies(ctx, ref, opt.SourcePolicies)
 	if err != nil {
-		return "", "", nil, err
+		return "", nil, err
 	}
-	resolveMode, err := source.ParseImageResolveMode(opt.ResolveMode)
+	resolveMode, err := resolver.ParseImageResolveMode(opt.ImageOpt.ResolveMode)
 	if err != nil {
-		return ref, "", nil, err
+		return "", nil, err
 	}
 	switch resolveMode {
-	case source.ResolveModeForcePull:
-		ref, dgst, dt, err := is.resolveRemote(ctx, ref, opt.Platform, sm, g)
+	case resolver.ResolveModeForcePull:
+		return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
 		// TODO: pull should fallback to local in case of failure to allow offline behavior
 		// the fallback doesn't work currently
-		return ref, dgst, dt, err
 		/*
 			if err == nil {
 				return dgst, dt, err
@@ -153,10 +225,10 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.Re
 			return "", dt, err
 		*/
 
-	case source.ResolveModeDefault:
+	case resolver.ResolveModeDefault:
 		// default == prefer local, but in the future could be smarter
 		fallthrough
-	case source.ResolveModePreferLocal:
+	case resolver.ResolveModePreferLocal:
 		img, err := is.resolveLocal(ref)
 		if err == nil {
 			if opt.Platform != nil && !platformMatches(img, opt.Platform) {
@@ -165,19 +237,19 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.Re
 					path.Join(img.OS, img.Architecture, img.Variant),
 				)
 			} else {
-				return ref, "", img.RawJSON(), err
+				return "", img.RawJSON(), err
 			}
 		}
 		// fallback to remote
 		return is.resolveRemote(ctx, ref, opt.Platform, sm, g)
 	}
 	// should never happen
-	return ref, "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ResolveMode)
+	return "", nil, fmt.Errorf("builder cannot resolve image %s: invalid mode %q", ref, opt.ImageOpt.ResolveMode)
 }
 
 // Resolve returns access to pulling for an identifier
 func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) {
-	imageIdentifier, ok := id.(*source.ImageIdentifier)
+	imageIdentifier, ok := id.(*containerimage.ImageIdentifier)
 	if !ok {
 		return nil, errors.Errorf("invalid image identifier %v", id)
 	}
@@ -201,7 +273,7 @@ type puller struct {
 	is               *Source
 	resolveLocalOnce sync.Once
 	g                flightcontrol.Group[struct{}]
-	src              *source.ImageIdentifier
+	src              *containerimage.ImageIdentifier
 	desc             ocispec.Descriptor
 	ref              string
 	config           []byte
@@ -253,7 +325,7 @@ func (p *puller) resolveLocal() {
 			}
 		}
 
-		if p.src.ResolveMode == source.ResolveModeDefault || p.src.ResolveMode == source.ResolveModePreferLocal {
+		if p.src.ResolveMode == resolver.ResolveModeDefault || p.src.ResolveMode == resolver.ResolveModePreferLocal {
 			ref := p.src.Reference.String()
 			img, err := p.is.resolveLocal(ref)
 			if err == nil {
@@ -302,12 +374,17 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error {
 			if err != nil {
 				return struct{}{}, err
 			}
-			newRef, _, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: p.src.ResolveMode.String()}, p.sm, g)
+			_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), sourceresolver.Opt{
+				Platform: &p.platform,
+				ImageOpt: &sourceresolver.ResolveImageOpt{
+					ResolveMode: p.src.ResolveMode.String(),
+				},
+			}, p.sm, g)
 			if err != nil {
 				return struct{}{}, err
 			}
 
-			p.ref = newRef
+			p.ref = ref.String()
 			p.config = dt
 		}
 		return struct{}{}, nil
@@ -866,12 +943,8 @@ func applySourcePolicies(ctx context.Context, str string, spls []*spb.Policy) (s
 	if err != nil {
 		return "", errors.WithStack(err)
 	}
-	op := &pb.Op{
-		Op: &pb.Op_Source{
-			Source: &pb.SourceOp{
-				Identifier: srctypes.DockerImageScheme + "://" + ref.String(),
-			},
-		},
+	op := &pb.SourceOp{
+		Identifier: srctypes.DockerImageScheme + "://" + ref.String(),
 	}
 
 	mut, err := sourcepolicy.NewEngine(spls).Evaluate(ctx, op)
@@ -884,9 +957,9 @@ func applySourcePolicies(ctx context.Context, str string, spls []*spb.Policy) (s
 			t  string
 			ok bool
 		)
-		t, newRef, ok := strings.Cut(op.GetSource().GetIdentifier(), "://")
+		t, newRef, ok := strings.Cut(op.GetIdentifier(), "://")
 		if !ok {
-			return "", errors.Errorf("could not parse ref: %s", op.GetSource().GetIdentifier())
+			return "", errors.Errorf("could not parse ref: %s", op.GetIdentifier())
 		}
 		if ok && t != srctypes.DockerImageScheme {
 			return "", &imageutil.ResolveToNonImageError{Ref: str, Updated: newRef}

+ 4 - 3
builder/builder-next/builder.go

@@ -389,9 +389,10 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder.
 	}
 
 	req := &controlapi.SolveRequest{
-		Ref:           id,
-		Exporter:      exporterName,
-		ExporterAttrs: exporterAttrs,
+		Ref: id,
+		Exporters: []*controlapi.Exporter{
+			&controlapi.Exporter{Type: exporterName, Attrs: exporterAttrs},
+		},
 		Frontend:      "dockerfile.v0",
 		FrontendAttrs: frontendAttrs,
 		Session:       opt.Options.SessionID,

+ 9 - 6
builder/builder-next/controller.go

@@ -67,11 +67,11 @@ func newController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control
 }
 
 func getTraceExporter(ctx context.Context) trace.SpanExporter {
-	exp, err := detect.Exporter()
+	span, _, err := detect.Exporter()
 	if err != nil {
 		log.G(ctx).WithError(err).Error("Failed to detect trace exporter for buildkit controller")
 	}
-	return exp
+	return span
 }
 
 func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt) (*control.Controller, error) {
@@ -105,7 +105,8 @@ func newSnapshotterController(ctx context.Context, rt http.RoundTripper, opt Opt
 	wo, err := containerd.NewWorkerOpt(opt.Root, opt.ContainerdAddress, opt.Snapshotter, opt.ContainerdNamespace,
 		opt.Rootless, map[string]string{
 			label.Snapshotter: opt.Snapshotter,
-		}, dns, nc, opt.ApparmorProfile, false, nil, "", ctd.WithTimeout(60*time.Second))
+		}, dns, nc, opt.ApparmorProfile, false, nil, "", nil, ctd.WithTimeout(60*time.Second),
+	)
 	if err != nil {
 		return nil, err
 	}
@@ -302,9 +303,11 @@ func newGraphDriverController(ctx context.Context, rt http.RoundTripper, opt Opt
 	}
 
 	exp, err := mobyexporter.New(mobyexporter.Opt{
-		ImageStore:  dist.ImageStore,
-		Differ:      differ,
-		ImageTagger: opt.ImageTagger,
+		ImageStore:   dist.ImageStore,
+		ContentStore: store,
+		Differ:       differ,
+		ImageTagger:  opt.ImageTagger,
+		LeaseManager: lm,
 	})
 	if err != nil {
 		return nil, err

+ 82 - 27
builder/builder-next/exporter/mobyexporter/export.go

@@ -1,24 +1,25 @@
 package mobyexporter
 
 import (
+	"bytes"
 	"context"
-	"encoding/json"
 	"fmt"
 	"strings"
+	"time"
 
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/leases"
 	distref "github.com/distribution/reference"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/layer"
 	"github.com/moby/buildkit/exporter"
+	"github.com/moby/buildkit/exporter/containerimage"
 	"github.com/moby/buildkit/exporter/containerimage/exptypes"
 	"github.com/opencontainers/go-digest"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 )
 
-const (
-	keyImageName = "name"
-)
-
 // Differ can make a moby layer from a snapshot
 type Differ interface {
 	EnsureLayer(ctx context.Context, key string) ([]layer.DiffID, error)
@@ -30,9 +31,11 @@ type ImageTagger interface {
 
 // Opt defines a struct for creating new exporter
 type Opt struct {
-	ImageStore  image.Store
-	Differ      Differ
-	ImageTagger ImageTagger
+	ImageStore   image.Store
+	Differ       Differ
+	ImageTagger  ImageTagger
+	ContentStore content.Store
+	LeaseManager leases.Manager
 }
 
 type imageExporter struct {
@@ -45,13 +48,14 @@ func New(opt Opt) (exporter.Exporter, error) {
 	return im, nil
 }
 
-func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
+func (e *imageExporter) Resolve(ctx context.Context, id int, opt map[string]string) (exporter.ExporterInstance, error) {
 	i := &imageExporterInstance{
 		imageExporter: e,
+		id:            id,
 	}
 	for k, v := range opt {
-		switch k {
-		case keyImageName:
+		switch exptypes.ImageExporterOptKey(k) {
+		case exptypes.OptKeyName:
 			for _, v := range strings.Split(v, ",") {
 				ref, err := distref.ParseNormalizedNamed(v)
 				if err != nil {
@@ -71,10 +75,15 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
 
 type imageExporterInstance struct {
 	*imageExporter
+	id          int
 	targetNames []distref.Named
 	meta        map[string][]byte
 }
 
+func (e *imageExporterInstance) ID() int {
+	return e.id
+}
+
 func (e *imageExporterInstance) Name() string {
 	return "exporting to image"
 }
@@ -83,7 +92,7 @@ func (e *imageExporterInstance) Config() *exporter.Config {
 	return exporter.NewConfig()
 }
 
-func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
+func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source, inlineCache exptypes.InlineCache, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
 	if len(inp.Refs) > 1 {
 		return nil, nil, fmt.Errorf("exporting multiple references to image store is currently unsupported")
 	}
@@ -103,18 +112,14 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
 	case 0:
 		config = inp.Metadata[exptypes.ExporterImageConfigKey]
 	case 1:
-		platformsBytes, ok := inp.Metadata[exptypes.ExporterPlatformsKey]
-		if !ok {
-			return nil, nil, fmt.Errorf("cannot export image, missing platforms mapping")
-		}
-		var p exptypes.Platforms
-		if err := json.Unmarshal(platformsBytes, &p); err != nil {
-			return nil, nil, errors.Wrapf(err, "failed to parse platforms passed to exporter")
+		ps, err := exptypes.ParsePlatforms(inp.Metadata)
+		if err != nil {
+			return nil, nil, fmt.Errorf("cannot export image, failed to parse platforms: %w", err)
 		}
-		if len(p.Platforms) != len(inp.Refs) {
-			return nil, nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs))
+		if len(ps.Platforms) != len(inp.Refs) {
+			return nil, nil, errors.Errorf("number of platforms does not match references %d %d", len(ps.Platforms), len(inp.Refs))
 		}
-		config = inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, p.Platforms[0].ID)]
+		config = inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, ps.Platforms[0].ID)]
 	}
 
 	var diffs []digest.Digest
@@ -157,7 +162,21 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
 
 	diffs, history = normalizeLayersAndHistory(diffs, history, ref)
 
-	config, err = patchImageConfig(config, diffs, history, inp.Metadata[exptypes.ExporterInlineCache])
+	var inlineCacheEntry *exptypes.InlineCacheEntry
+	if inlineCache != nil {
+		inlineCacheResult, err := inlineCache(ctx)
+		if err != nil {
+			return nil, nil, err
+		}
+		if inlineCacheResult != nil {
+			if ref != nil {
+				inlineCacheEntry, _ = inlineCacheResult.FindRef(ref.ID())
+			} else {
+				inlineCacheEntry = inlineCacheResult.Ref
+			}
+		}
+	}
+	config, err = patchImageConfig(config, diffs, history, inlineCacheEntry)
 	if err != nil {
 		return nil, nil, err
 	}
@@ -171,8 +190,10 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
 	}
 	_ = configDone(nil)
 
-	if e.opt.ImageTagger != nil {
-		for _, targetName := range e.targetNames {
+	var names []string
+	for _, targetName := range e.targetNames {
+		names = append(names, targetName.String())
+		if e.opt.ImageTagger != nil {
 			tagDone := oneOffProgress(ctx, "naming to "+targetName.String())
 			if err := e.opt.ImageTagger.TagImage(ctx, image.ID(digest.Digest(id)), targetName); err != nil {
 				return nil, nil, tagDone(err)
@@ -181,8 +202,42 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source
 		}
 	}
 
-	return map[string]string{
+	resp := map[string]string{
 		exptypes.ExporterImageConfigDigestKey: configDigest.String(),
 		exptypes.ExporterImageDigestKey:       id.String(),
-	}, nil, nil
+	}
+	if len(names) > 0 {
+		resp["image.name"] = strings.Join(names, ",")
+	}
+
+	descRef, err := e.newTempReference(ctx, config)
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to create a temporary descriptor reference: %w", err)
+	}
+
+	return resp, descRef, nil
+}
+
+func (e *imageExporterInstance) newTempReference(ctx context.Context, config []byte) (exporter.DescriptorReference, error) {
+	lm := e.opt.LeaseManager
+
+	dgst := digest.FromBytes(config)
+	lease, err := lm.Create(ctx, leases.WithRandomID(), leases.WithExpiration(time.Hour))
+	if err != nil {
+		return nil, err
+	}
+
+	desc := ocispec.Descriptor{
+		Digest:    dgst,
+		MediaType: "application/vnd.docker.container.image.v1+json",
+		Size:      int64(len(config)),
+	}
+
+	if err := content.WriteBlob(ctx, e.opt.ContentStore, desc.Digest.String(), bytes.NewReader(config), desc); err != nil {
+		return nil, fmt.Errorf("failed to save temporary image config: %w", err)
+	}
+
+	return containerimage.NewDescriptorReference(desc, func(ctx context.Context) error {
+		return lm.Delete(ctx, lease)
+	}), nil
 }

+ 3 - 2
builder/builder-next/exporter/mobyexporter/writer.go

@@ -8,6 +8,7 @@ import (
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/log"
 	"github.com/moby/buildkit/cache"
+	"github.com/moby/buildkit/exporter/containerimage/exptypes"
 	"github.com/moby/buildkit/util/progress"
 	"github.com/moby/buildkit/util/system"
 	"github.com/opencontainers/go-digest"
@@ -38,7 +39,7 @@ func parseHistoryFromConfig(dt []byte) ([]ocispec.History, error) {
 	return config.History, nil
 }
 
-func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History, cache []byte) ([]byte, error) {
+func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History, cache *exptypes.InlineCacheEntry) ([]byte, error) {
 	m := map[string]json.RawMessage{}
 	if err := json.Unmarshal(dt, &m); err != nil {
 		return nil, errors.Wrap(err, "failed to parse image config for patch")
@@ -75,7 +76,7 @@ func patchImageConfig(dt []byte, dps []digest.Digest, history []ocispec.History,
 	}
 
 	if cache != nil {
-		dt, err := json.Marshal(cache)
+		dt, err := json.Marshal(cache.Data)
 		if err != nil {
 			return nil, err
 		}

+ 2 - 2
builder/builder-next/exporter/overrides/wrapper.go

@@ -19,7 +19,7 @@ func NewExporterWrapper(exp exporter.Exporter) (exporter.Exporter, error) {
 }
 
 // Resolve applies moby specific attributes to the request.
-func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, exporterAttrs map[string]string) (exporter.ExporterInstance, error) {
+func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, id int, exporterAttrs map[string]string) (exporter.ExporterInstance, error) {
 	if exporterAttrs == nil {
 		exporterAttrs = make(map[string]string)
 	}
@@ -33,5 +33,5 @@ func (e *imageExporterMobyWrapper) Resolve(ctx context.Context, exporterAttrs ma
 		exporterAttrs[string(exptypes.OptKeyDanglingPrefix)] = "moby-dangling"
 	}
 
-	return e.exp.Resolve(ctx, exporterAttrs)
+	return e.exp.Resolve(ctx, id, exporterAttrs)
 }

+ 48 - 4
builder/builder-next/worker/worker.go

@@ -12,7 +12,7 @@ import (
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/rootfs"
 	"github.com/containerd/log"
-	"github.com/docker/docker/builder/builder-next/adapters/containerimage"
+	imageadapter "github.com/docker/docker/builder/builder-next/adapters/containerimage"
 	mobyexporter "github.com/docker/docker/builder/builder-next/exporter"
 	distmetadata "github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/xfer"
@@ -23,7 +23,7 @@ import (
 	"github.com/moby/buildkit/cache"
 	cacheconfig "github.com/moby/buildkit/cache/config"
 	"github.com/moby/buildkit/client"
-	"github.com/moby/buildkit/client/llb"
+	"github.com/moby/buildkit/client/llb/sourceresolver"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/exporter"
 	localexporter "github.com/moby/buildkit/exporter/local"
@@ -37,6 +37,7 @@ import (
 	"github.com/moby/buildkit/solver/llbsolver/ops"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/source"
+	"github.com/moby/buildkit/source/containerimage"
 	"github.com/moby/buildkit/source/git"
 	"github.com/moby/buildkit/source/http"
 	"github.com/moby/buildkit/source/local"
@@ -75,7 +76,7 @@ type Opt struct {
 	ContentStore      *containerdsnapshot.Store
 	CacheManager      cache.Manager
 	LeaseManager      *leaseutil.Manager
-	ImageSource       *containerimage.Source
+	ImageSource       *imageadapter.Source
 	DownloadManager   *xfer.LayerDownloadManager
 	V2MetadataService distmetadata.V2MetadataService
 	Transport         nethttp.RoundTripper
@@ -212,6 +213,49 @@ func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.Imm
 	return w.CacheManager().Get(ctx, id, nil, opts...)
 }
 
+func (w *Worker) ResolveSourceMetadata(ctx context.Context, op *pb.SourceOp, opt sourceresolver.Opt, sm *session.Manager, g session.Group) (*sourceresolver.MetaResponse, error) {
+	if opt.SourcePolicies != nil {
+		return nil, errors.New("source policies can not be set for worker")
+	}
+
+	var platform *pb.Platform
+	if p := opt.Platform; p != nil {
+		platform = &pb.Platform{
+			Architecture: p.Architecture,
+			OS:           p.OS,
+			Variant:      p.Variant,
+			OSVersion:    p.OSVersion,
+		}
+	}
+
+	id, err := w.SourceManager.Identifier(&pb.Op_Source{Source: op}, platform)
+	if err != nil {
+		return nil, err
+	}
+
+	switch idt := id.(type) {
+	case *containerimage.ImageIdentifier:
+		if opt.ImageOpt == nil {
+			opt.ImageOpt = &sourceresolver.ResolveImageOpt{}
+		}
+		dgst, config, err := w.ImageSource.ResolveImageConfig(ctx, idt.Reference.String(), opt, sm, g)
+		if err != nil {
+			return nil, err
+		}
+		return &sourceresolver.MetaResponse{
+			Op: op,
+			Image: &sourceresolver.ResolveImageResponse{
+				Digest: dgst,
+				Config: config,
+			},
+		}, nil
+	}
+
+	return &sourceresolver.MetaResponse{
+		Op: op,
+	}, nil
+}
+
 // ResolveOp converts a LLB vertex into a LLB operation
 func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) {
 	if baseOp, ok := v.Sys().(*pb.Op); ok {
@@ -236,7 +280,7 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se
 }
 
 // ResolveImageConfig returns image config for an image
-func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) {
+func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt sourceresolver.Opt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) {
 	return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g)
 }
 

+ 13 - 5
cmd/dockerd/daemon.go

@@ -242,7 +242,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 
 	// Override BuildKit's default Resource so that it matches the semconv
 	// version that is used in our code.
-	detect.Resource = resource.Default()
+	detect.OverrideResource(resource.Default())
 	detect.Recorder = detect.NewTraceRecorder()
 
 	tp, err := detect.TracerProvider()
@@ -380,12 +380,20 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
 // TODO: This can be removed after buildkit is updated to use http/protobuf as the default.
 func setOTLPProtoDefault() {
 	const (
-		tracesEnv = "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL"
-		protoEnv  = "OTEL_EXPORTER_OTLP_PROTOCOL"
+		tracesEnv  = "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL"
+		metricsEnv = "OTEL_EXPORTER_OTLP_METRICS_PROTOCOL"
+		protoEnv   = "OTEL_EXPORTER_OTLP_PROTOCOL"
+
+		defaultProto = "http/protobuf"
 	)
 
-	if os.Getenv(tracesEnv) == "" && os.Getenv(protoEnv) == "" {
-		os.Setenv(tracesEnv, "http/protobuf")
+	if os.Getenv(protoEnv) == "" {
+		if os.Getenv(tracesEnv) == "" {
+			os.Setenv(tracesEnv, defaultProto)
+		}
+		if os.Getenv(metricsEnv) == "" {
+			os.Setenv(metricsEnv, defaultProto)
+		}
 	}
 }
 

+ 2 - 2
hack/with-go-mod.sh

@@ -25,9 +25,9 @@ else
 	tee "${ROOTDIR}/go.mod" >&2 <<- EOF
 		module github.com/docker/docker
 
-		go 1.20
+		go 1.21
 	EOF
 	trap 'rm -f "${ROOTDIR}/go.mod"' EXIT
 fi
 
-GO111MODULE=on "$@"
+GO111MODULE=on GOTOOLCHAIN=local "$@"

+ 5 - 1
integration/build/build_session_test.go

@@ -15,6 +15,7 @@ import (
 	"github.com/docker/docker/testutil/request"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session/filesync"
+	"github.com/tonistiigi/fsutil"
 	"golang.org/x/sync/errgroup"
 	"gotest.tools/v3/assert"
 	is "gotest.tools/v3/assert/cmp"
@@ -95,8 +96,11 @@ func testBuildWithSession(ctx context.Context, t *testing.T, client dclient.APIC
 	sess, err := session.NewSession(ctx, "foo1", "foo")
 	assert.Check(t, err)
 
+	fs, err := fsutil.NewFS(dir)
+	assert.NilError(t, err)
+
 	fsProvider := filesync.NewFSSyncProvider(filesync.StaticDirSource{
-		"": {Dir: dir},
+		"": fs,
 	})
 	sess.Allow(fsProvider)
 

+ 4 - 1
integration/build/build_traces_test.go

@@ -56,8 +56,11 @@ func TestBuildkitHistoryTracePropagation(t *testing.T) {
 		<-sub.Context().Done()
 	}()
 
+	d, err := progressui.NewDisplay(&testWriter{t}, progressui.AutoMode, progressui.WithPhase("test"))
+	assert.NilError(t, err)
+
 	eg.Go(func() error {
-		_, err := progressui.DisplaySolveStatus(ctxGo, nil, &testWriter{t}, ch, progressui.WithPhase("test"))
+		_, err := d.UpdateFrom(ctxGo, ch)
 		return err
 	})
 

+ 11 - 5
vendor.mod

@@ -4,7 +4,7 @@
 
 module github.com/docker/docker
 
-go 1.20
+go 1.21
 
 require (
 	cloud.google.com/go/compute/metadata v0.2.3
@@ -60,7 +60,7 @@ require (
 	github.com/miekg/dns v1.1.43
 	github.com/mistifyio/go-zfs/v3 v3.0.1
 	github.com/mitchellh/copystructure v1.2.0
-	github.com/moby/buildkit v0.12.5
+	github.com/moby/buildkit v0.13.0-rc2
 	github.com/moby/docker-image-spec v1.3.1
 	github.com/moby/ipvs v1.1.0
 	github.com/moby/locker v1.0.1
@@ -87,6 +87,7 @@ require (
 	github.com/sirupsen/logrus v1.9.3
 	github.com/spf13/cobra v1.8.0
 	github.com/spf13/pflag v1.0.5
+	github.com/tonistiigi/fsutil v0.0.0-20240223190444-7a889f53dbf6
 	github.com/tonistiigi/go-archvariant v1.0.0
 	github.com/vbatts/tar-split v0.11.5
 	github.com/vishvananda/netlink v1.2.1-beta.2
@@ -136,13 +137,14 @@ require (
 	github.com/cilium/ebpf v0.11.0 // indirect
 	github.com/container-storage-interface/spec v1.5.0 // indirect
 	github.com/containerd/cgroups v1.1.0 // indirect
-	github.com/containerd/console v1.0.3 // indirect
+	github.com/containerd/console v1.0.4 // indirect
 	github.com/containerd/go-cni v1.1.9 // indirect
 	github.com/containerd/go-runc v1.1.0 // indirect
 	github.com/containerd/nydus-snapshotter v0.13.7 // indirect
-	github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
+	github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
 	github.com/containerd/ttrpc v1.2.2 // indirect
 	github.com/containernetworking/cni v1.1.2 // indirect
+	github.com/containernetworking/plugins v1.4.0 // indirect
 	github.com/cyphar/filepath-securejoin v0.2.4 // indirect
 	github.com/dimchansky/utfbom v1.1.1 // indirect
 	github.com/dustin/go-humanize v1.0.0 // indirect
@@ -185,7 +187,6 @@ require (
 	github.com/spdx/tools-golang v0.5.1 // indirect
 	github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
 	github.com/tinylib/msgp v1.1.8 // indirect
-	github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb // indirect
 	github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 // indirect
 	github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
 	github.com/tonistiigi/vt100 v0.0.0-20230623042737-f9a4f7ef6531 // indirect
@@ -198,9 +199,14 @@ require (
 	go.etcd.io/etcd/server/v3 v3.5.6 // indirect
 	go.opencensus.io v0.24.0 // indirect
 	go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.46.1 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 // indirect
 	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
 	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect
+	go.opentelemetry.io/otel/exporters/prometheus v0.42.0 // indirect
 	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect
 	go.opentelemetry.io/proto/otlp v1.0.0 // indirect
 	go.uber.org/atomic v1.9.0 // indirect
 	go.uber.org/multierr v1.8.0 // indirect

+ 48 - 11
vendor.sum

@@ -34,6 +34,7 @@ cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7
 cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
 cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
 cloud.google.com/go/iam v1.1.3 h1:18tKG7DzydKWUnLjonWcJO6wjSCAtzh4GcRKlH/Hrzc=
+cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE=
 cloud.google.com/go/logging v1.8.1 h1:26skQWPeYhvIasWKm48+Eq7oUqdcdbwsCVwz5Ys0FvU=
 cloud.google.com/go/logging v1.8.1/go.mod h1:TJjR+SimHwuC8MZ9cjByQulAMgni+RkXeI3wwctHJEI=
 cloud.google.com/go/longrunning v0.5.2 h1:u+oFqfEwwU7F9dIELigxbe0XVnBAo9wqMuQLA50CZ5k=
@@ -119,6 +120,7 @@ github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF0
 github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
 github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
 github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
+github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
 github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
 github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
 github.com/Microsoft/go-winio v0.4.15-0.20200908182639-5b44b70ab3ab/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
@@ -147,7 +149,9 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU
 github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
 github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
 github.com/akutz/gosync v0.1.0 h1:naxPT/aDYDh79PMwM3XmencmNQeYmpNFSZy4ZE9zIW0=
+github.com/akutz/gosync v0.1.0/go.mod h1:I8I4aiqJI1nqaeYOOB1WS+CgRJVVPqhct9Y4njywM84=
 github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A=
+github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw=
 github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE=
 github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
 github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -276,6 +280,7 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
 github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
 github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
 github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
+github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
 github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
 github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E=
 github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
@@ -285,6 +290,7 @@ github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOi
 github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
 github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
 github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
+github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
 github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo=
 github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s=
 github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
@@ -298,8 +304,8 @@ github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on
 github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
 github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
 github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
-github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
-github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
+github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
+github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
 github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
 github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
 github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
@@ -328,8 +334,8 @@ github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3
 github.com/containerd/nydus-snapshotter v0.13.7 h1:x7DHvGnzJOu1ZPwPYkeOPk5MjZZYbdddygEjaSDoFTk=
 github.com/containerd/nydus-snapshotter v0.13.7/go.mod h1:VPVKQ3jmHFIcUIV2yiQ1kImZuBFS3GXDohKs9mRABVE=
 github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4=
-github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
-github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
+github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
+github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
 github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
 github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
 github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs=
@@ -341,6 +347,8 @@ github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3H
 github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
 github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ=
 github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
+github.com/containernetworking/plugins v1.4.0 h1:+w22VPYgk7nQHw7KT92lsRmuToHvb7wwSv9iTbXzzic=
+github.com/containernetworking/plugins v1.4.0/go.mod h1:UYhcOyjefnrQvKvmmyEKsUA+M9Nfn7tqULPpH0Pkcj0=
 github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
 github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
 github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ=
@@ -443,6 +451,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m
 github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
+github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
 github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
 github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
 github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
@@ -457,6 +466,7 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI
 github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
 github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
+github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
 github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
 github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
 github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
@@ -506,6 +516,7 @@ github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LB
 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
 github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
 github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
 github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
 github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
 github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
@@ -549,6 +560,7 @@ github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2 h1:xisWqjiKEff2B0KfFYG
 github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
 github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
+github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
 github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
 github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
 github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -644,8 +656,9 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf
 github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
 github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
 github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
-github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
 github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBxH5Pj7KeFK5l+Y3FsgT8keqKtk=
+github.com/google/pprof v0.0.0-20230323073829-e72429f035bd/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk=
 github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
 github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg=
 github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc=
@@ -826,6 +839,7 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
 github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
 github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
 github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
 github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
 github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
 github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
@@ -899,8 +913,8 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx
 github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
 github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
 github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ=
-github.com/moby/buildkit v0.12.5 h1:RNHH1l3HDhYyZafr5EgstEu8aGNCwyfvMtrQDtjH9T0=
-github.com/moby/buildkit v0.12.5/go.mod h1:YGwjA2loqyiYfZeEo8FtI7z4x5XponAaIWsWcSjWwso=
+github.com/moby/buildkit v0.13.0-rc2 h1:LWAIkaBIoRTne57NJCnFMdFV30auPia3j9UUZeUc24A=
+github.com/moby/buildkit v0.13.0-rc2/go.mod h1:RWPZ1bRcehlF1bjPzj7+wOPZ5cLViAEtx5ZNQWma5/s=
 github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
 github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
 github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ=
@@ -971,8 +985,10 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0
 github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
 github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
 github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
 github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
-github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU=
+github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs=
+github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM=
 github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
 github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
 github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -982,7 +998,8 @@ github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT
 github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
 github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
 github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
-github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
+github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
+github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
 github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
 github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
 github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -1031,6 +1048,7 @@ github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko
 github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
 github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
 github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee h1:P6U24L02WMfj9ymZTxl7CxS73JC99x3ukk+DBkgQGQs=
+github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee/go.mod h1:3uODdxMgOaPYeWU7RzZLxVtJHZ/x1f/iHkBZuKJDzuY=
 github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
 github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
 github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
@@ -1097,12 +1115,14 @@ github.com/quasilyte/go-ruleguard v0.1.2-0.20200318202121-b00d7a75d3d8/go.mod h1
 github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
 github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
 github.com/rexray/gocsi v1.2.2 h1:h9F/eSizORihN+XT+mxhq7ClZ3cYo1L9RvasN6dKz8U=
+github.com/rexray/gocsi v1.2.2/go.mod h1:X9oJHHpIVGmfKdK8e+JuCXafggk7HxL9mWQOgrsoHpo=
 github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
 github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
 github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
 github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
 github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
 github.com/rootless-containers/rootlesskit/v2 v2.0.1 h1:yMUDTn9dMWtTkccosPDJpMVxjhmEjSD6jYyaePCXshg=
 github.com/rootless-containers/rootlesskit/v2 v2.0.1/go.mod h1:ZwETpgA/DPizAF7Zdui4ZHOfYK5rZ4Z4SUO6omyZVfY=
 github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
@@ -1200,6 +1220,7 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
 github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
 github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
 github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
 github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@@ -1224,8 +1245,8 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1
 github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig=
 github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig=
 github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85/go.mod h1:a7cilN64dG941IOXfhJhlH0qB92hxJ9A1ewrdUmJ6xo=
-github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb h1:uUe8rNyVXM8moActoBol6Xf6xX2GMr7SosR2EywMvGg=
-github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb/go.mod h1:SxX/oNQ/ag6Vaoli547ipFK9J7BZn5JqJG0JE8lf8bA=
+github.com/tonistiigi/fsutil v0.0.0-20240223190444-7a889f53dbf6 h1:v9u6pmdUkarXL/1S/6LGcG9wsiBLd9N/WyJq/Y9WPcg=
+github.com/tonistiigi/fsutil v0.0.0-20240223190444-7a889f53dbf6/go.mod h1:vbbYqJlnswsbJqWUcJN8fKtBhnEgldDrcagTgnBVKKM=
 github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 h1:8eY6m1mjgyB8XySUR7WvebTM8D/Vs86jLJzD/Tw7zkc=
 github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7/go.mod h1:qqvyZqkfwkoJuPU/bw61bItaoO0SJ8YSW0vSVRRvsRg=
 github.com/tonistiigi/go-archvariant v1.0.0 h1:5LC1eDWiBNflnTF1prCiX09yfNHIxDC/aukdhCdTyb0=
@@ -1272,6 +1293,7 @@ github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfD
 github.com/xanzy/go-gitlab v0.32.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
 github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
 github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
+github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
 github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
 github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
 github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
@@ -1334,6 +1356,12 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:
 go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU=
 go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
 go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 h1:ZtfnDL+tUrs1F0Pzfwbg2d59Gru9NCH3bgSHBM6LDwU=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0/go.mod h1:hG4Fj/y8TR/tlEDREo8tWstl9fO9gcFkn4xrx0Io8xU=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 h1:NmnYCiR0qNufkldjVvyQfZTHSdzeHoZ41zggMsdMcLM=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0/go.mod h1:UVAO61+umUsHLtYb8KXXRoHtxUkdOPkYidzW3gipRLQ=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 h1:wNMDy/LVGLj2h3p6zg4d0gypKfWKSWI14E1C4smOgl8=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0/go.mod h1:YfbDdXAAkemWJK3H/DshvlrxqFB2rtW4rY6ky/3x/H0=
 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw=
 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
@@ -1342,11 +1370,15 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqhe
 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
+go.opentelemetry.io/otel/exporters/prometheus v0.42.0 h1:jwV9iQdvp38fxXi8ZC+lNpxjK16MRcZlpDYvbuO1FiA=
+go.opentelemetry.io/otel/exporters/prometheus v0.42.0/go.mod h1:f3bYiqNqhoPxkvI2LrXqQVC546K7BuRDL/kKuxkujhA=
 go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
 go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
 go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI=
 go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
 go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
+go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0=
+go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q=
 go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk=
 go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
 go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
@@ -1361,6 +1393,7 @@ go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
 go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
 go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
 go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
 go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
 go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
 go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
@@ -1621,6 +1654,7 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
 golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
+golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
 golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1854,6 +1888,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
 gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
 gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
 gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
@@ -1931,7 +1966,9 @@ k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuB
 k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
 k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
 kernel.org/pub/linux/libs/security/libcap/cap v1.2.67 h1:sPQ9qlSNR26fToTKbxe/HDWJlXvBLqGmt84LGCQkOy0=
+kernel.org/pub/linux/libs/security/libcap/cap v1.2.67/go.mod h1:GkntoBuwffz19qtdFVB+k2NtWNN+yCKnC/Ykv/hMiTU=
 kernel.org/pub/linux/libs/security/libcap/psx v1.2.67 h1:NxbXJ7pDVq0FKBsqjieT92QDXI2XaqH2HAi4QcCOHt8=
+kernel.org/pub/linux/libs/security/libcap/psx v1.2.67/go.mod h1:+l6Ee2F59XiJ2I6WR5ObpC1utCQJZ/VLsEbQCD8RG24=
 modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
 modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
 modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=

+ 7 - 7
vendor/github.com/containerd/console/.golangci.yml

@@ -1,16 +1,16 @@
 linters:
   enable:
-    - structcheck
-    - varcheck
-    - staticcheck
-    - unconvert
     - gofmt
     - goimports
-    - golint
     - ineffassign
-    - vet
-    - unused
     - misspell
+    - revive
+    - staticcheck
+    - structcheck
+    - unconvert
+    - unused
+    - varcheck
+    - vet
   disable:
     - errcheck
 

+ 3 - 3
vendor/github.com/containerd/console/README.md

@@ -22,8 +22,8 @@ current.Resize(ws)
 
 console is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
 As a containerd sub-project, you will find the:
- * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
- * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
- * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
+ * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
+ * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
+ * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
 
 information in our [`containerd/project`](https://github.com/containerd/project) repository.

+ 6 - 3
vendor/github.com/containerd/console/console.go

@@ -22,7 +22,10 @@ import (
 	"os"
 )
 
-var ErrNotAConsole = errors.New("provided file is not a console")
+var (
+	ErrNotAConsole    = errors.New("provided file is not a console")
+	ErrNotImplemented = errors.New("not implemented")
+)
 
 type File interface {
 	io.ReadWriteCloser
@@ -45,7 +48,7 @@ type Console interface {
 	SetRaw() error
 	// DisableEcho disables echo on the console
 	DisableEcho() error
-	// Reset restores the console to its orignal state
+	// Reset restores the console to its original state
 	Reset() error
 	// Size returns the window size of the console
 	Size() (WinSize, error)
@@ -78,7 +81,7 @@ func Current() (c Console) {
 }
 
 // ConsoleFromFile returns a console using the provided file
-// nolint:golint
+// nolint:revive
 func ConsoleFromFile(f File) (Console, error) {
 	if err := checkConsole(f); err != nil {
 		return nil, err

+ 1 - 0
vendor/github.com/containerd/console/console_linux.go

@@ -1,3 +1,4 @@
+//go:build linux
 // +build linux
 
 /*

+ 36 - 0
vendor/github.com/containerd/console/console_other.go

@@ -0,0 +1,36 @@
+//go:build !darwin && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
+// +build !darwin,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+// NewPty creates a new pty pair
+// The master is returned as the first console and a string
+// with the path to the pty slave is returned as the second
+func NewPty() (Console, string, error) {
+	return nil, "", ErrNotImplemented
+}
+
+// checkConsole checks if the provided file is a console
+func checkConsole(f File) error {
+	return ErrNotAConsole
+}
+
+func newMaster(f File) (Console, error) {
+	return nil, ErrNotImplemented
+}

+ 2 - 1
vendor/github.com/containerd/console/console_unix.go

@@ -1,4 +1,5 @@
-// +build darwin freebsd linux netbsd openbsd solaris
+//go:build darwin || freebsd || linux || netbsd || openbsd || zos
+// +build darwin freebsd linux netbsd openbsd zos
 
 /*
    Copyright The containerd Authors.

+ 14 - 11
vendor/github.com/containerd/console/console_windows.go

@@ -24,12 +24,13 @@ import (
 	"golang.org/x/sys/windows"
 )
 
-var (
-	vtInputSupported  bool
-	ErrNotImplemented = errors.New("not implemented")
-)
+var vtInputSupported bool
 
 func (m *master) initStdios() {
+	// Note: We discard console mode warnings, because in/out can be redirected.
+	//
+	// TODO: Investigate opening CONOUT$/CONIN$ to handle this correctly
+
 	m.in = windows.Handle(os.Stdin.Fd())
 	if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil {
 		// Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
@@ -39,8 +40,6 @@ func (m *master) initStdios() {
 		// Unconditionally set the console mode back even on failure because SetConsoleMode
 		// remembers invalid bits on input handles.
 		windows.SetConsoleMode(m.in, m.inMode)
-	} else {
-		fmt.Printf("failed to get console mode for stdin: %v\n", err)
 	}
 
 	m.out = windows.Handle(os.Stdout.Fd())
@@ -50,8 +49,6 @@ func (m *master) initStdios() {
 		} else {
 			windows.SetConsoleMode(m.out, m.outMode)
 		}
-	} else {
-		fmt.Printf("failed to get console mode for stdout: %v\n", err)
 	}
 
 	m.err = windows.Handle(os.Stderr.Fd())
@@ -61,8 +58,6 @@ func (m *master) initStdios() {
 		} else {
 			windows.SetConsoleMode(m.err, m.errMode)
 		}
-	} else {
-		fmt.Printf("failed to get console mode for stderr: %v\n", err)
 	}
 }
 
@@ -94,6 +89,8 @@ func (m *master) SetRaw() error {
 }
 
 func (m *master) Reset() error {
+	var errs []error
+
 	for _, s := range []struct {
 		fd   windows.Handle
 		mode uint32
@@ -103,10 +100,16 @@ func (m *master) Reset() error {
 		{m.err, m.errMode},
 	} {
 		if err := windows.SetConsoleMode(s.fd, s.mode); err != nil {
-			return fmt.Errorf("unable to restore console mode: %w", err)
+			// we can't just abort on the first error, otherwise we might leave
+			// the console in an unexpected state.
+			errs = append(errs, fmt.Errorf("unable to restore console mode: %w", err))
 		}
 	}
 
+	if len(errs) > 0 {
+		return errs[0]
+	}
+
 	return nil
 }
 

+ 0 - 163
vendor/github.com/containerd/console/console_zos.go

@@ -1,163 +0,0 @@
-// +build zos
-
-/*
-   Copyright The containerd Authors.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-*/
-
-package console
-
-import (
-	"fmt"
-	"os"
-
-	"golang.org/x/sys/unix"
-)
-
-// NewPty creates a new pty pair
-// The master is returned as the first console and a string
-// with the path to the pty slave is returned as the second
-func NewPty() (Console, string, error) {
-	var f File
-	var err error
-	var slave string
-	for i := 0;; i++ {
-		ptyp := fmt.Sprintf("/dev/ptyp%04d", i)
-		f, err = os.OpenFile(ptyp, os.O_RDWR, 0600)
-		if err == nil {
-			slave = fmt.Sprintf("/dev/ttyp%04d", i)
-			break
-		}
-		if os.IsNotExist(err) {
-			return nil, "", err
-		}
-		// else probably Resource Busy
-	}
-	m, err := newMaster(f)
-	if err != nil {
-		return nil, "", err
-	}
-	return m, slave, nil
-}
-
-type master struct {
-	f        File
-	original *unix.Termios
-}
-
-func (m *master) Read(b []byte) (int, error) {
-	return m.f.Read(b)
-}
-
-func (m *master) Write(b []byte) (int, error) {
-	return m.f.Write(b)
-}
-
-func (m *master) Close() error {
-	return m.f.Close()
-}
-
-func (m *master) Resize(ws WinSize) error {
-	return tcswinsz(m.f.Fd(), ws)
-}
-
-func (m *master) ResizeFrom(c Console) error {
-	ws, err := c.Size()
-	if err != nil {
-		return err
-	}
-	return m.Resize(ws)
-}
-
-func (m *master) Reset() error {
-	if m.original == nil {
-		return nil
-	}
-	return tcset(m.f.Fd(), m.original)
-}
-
-func (m *master) getCurrent() (unix.Termios, error) {
-	var termios unix.Termios
-	if err := tcget(m.f.Fd(), &termios); err != nil {
-		return unix.Termios{}, err
-	}
-	return termios, nil
-}
-
-func (m *master) SetRaw() error {
-	rawState, err := m.getCurrent()
-	if err != nil {
-		return err
-	}
-	rawState = cfmakeraw(rawState)
-	rawState.Oflag = rawState.Oflag | unix.OPOST
-	return tcset(m.f.Fd(), &rawState)
-}
-
-func (m *master) DisableEcho() error {
-	rawState, err := m.getCurrent()
-	if err != nil {
-		return err
-	}
-	rawState.Lflag = rawState.Lflag &^ unix.ECHO
-	return tcset(m.f.Fd(), &rawState)
-}
-
-func (m *master) Size() (WinSize, error) {
-	return tcgwinsz(m.f.Fd())
-}
-
-func (m *master) Fd() uintptr {
-	return m.f.Fd()
-}
-
-func (m *master) Name() string {
-	return m.f.Name()
-}
-
-// checkConsole checks if the provided file is a console
-func checkConsole(f File) error {
-	var termios unix.Termios
-	if tcget(f.Fd(), &termios) != nil {
-		return ErrNotAConsole
-	}
-	return nil
-}
-
-func newMaster(f File) (Console, error) {
-	m := &master{
-		f: f,
-	}
-	t, err := m.getCurrent()
-	if err != nil {
-		return nil, err
-	}
-	m.original = &t
-	return m, nil
-}
-
-// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
-// created by us acts normally. In particular, a not-very-well-known default of
-// Linux unix98 ptys is that they have +onlcr by default. While this isn't a
-// problem for terminal emulators, because we relay data from the terminal we
-// also relay that funky line discipline.
-func ClearONLCR(fd uintptr) error {
-	return setONLCR(fd, false)
-}
-
-// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
-// created by us acts as intended for a terminal emulator.
-func SetONLCR(fd uintptr) error {
-	return setONLCR(fd, true)
-}

+ 1 - 0
vendor/github.com/containerd/console/pty_freebsd_cgo.go

@@ -1,3 +1,4 @@
+//go:build freebsd && cgo
 // +build freebsd,cgo
 
 /*

+ 1 - 0
vendor/github.com/containerd/console/pty_freebsd_nocgo.go

@@ -1,3 +1,4 @@
+//go:build freebsd && !cgo
 // +build freebsd,!cgo
 
 /*

+ 2 - 1
vendor/github.com/containerd/console/pty_unix.go

@@ -1,4 +1,5 @@
-// +build darwin linux netbsd openbsd solaris
+//go:build darwin || linux || netbsd || openbsd
+// +build darwin linux netbsd openbsd
 
 /*
    Copyright The containerd Authors.

+ 43 - 0
vendor/github.com/containerd/console/pty_zos.go

@@ -0,0 +1,43 @@
+//go:build zos
+// +build zos
+
+/*
+   Copyright The containerd Authors.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+*/
+
+package console
+
+import (
+	"fmt"
+	"os"
+)
+
+// openpt allocates a new pseudo-terminal by opening the first available /dev/ptypXX device
+func openpt() (*os.File, error) {
+	var f *os.File
+	var err error
+	for i := 0; ; i++ {
+		ptyp := fmt.Sprintf("/dev/ptyp%04d", i)
+		f, err = os.OpenFile(ptyp, os.O_RDWR, 0600)
+		if err == nil {
+			break
+		}
+		if os.IsNotExist(err) {
+			return nil, err
+		}
+		// else probably Resource Busy
+	}
+	return f, nil
+}

+ 1 - 0
vendor/github.com/containerd/console/tc_freebsd_cgo.go

@@ -1,3 +1,4 @@
+//go:build freebsd && cgo
 // +build freebsd,cgo
 
 /*

+ 1 - 0
vendor/github.com/containerd/console/tc_freebsd_nocgo.go

@@ -1,3 +1,4 @@
+//go:build freebsd && !cgo
 // +build freebsd,!cgo
 
 /*

+ 1 - 0
vendor/github.com/containerd/console/tc_openbsd_cgo.go

@@ -1,3 +1,4 @@
+//go:build openbsd && cgo
 // +build openbsd,cgo
 
 /*

+ 1 - 0
vendor/github.com/containerd/console/tc_openbsd_nocgo.go

@@ -1,3 +1,4 @@
+//go:build openbsd && !cgo
 // +build openbsd,!cgo
 
 /*

+ 0 - 51
vendor/github.com/containerd/console/tc_solaris_cgo.go

@@ -1,51 +0,0 @@
-// +build solaris,cgo
-
-/*
-   Copyright The containerd Authors.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-*/
-
-package console
-
-import (
-	"os"
-
-	"golang.org/x/sys/unix"
-)
-
-//#include <stdlib.h>
-import "C"
-
-const (
-	cmdTcGet = unix.TCGETS
-	cmdTcSet = unix.TCSETS
-)
-
-// ptsname retrieves the name of the first available pts for the given master.
-func ptsname(f *os.File) (string, error) {
-	ptspath, err := C.ptsname(C.int(f.Fd()))
-	if err != nil {
-		return "", err
-	}
-	return C.GoString(ptspath), nil
-}
-
-// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
-// unlockpt should be called before opening the slave side of a pty.
-func unlockpt(f *os.File) error {
-	if _, err := C.grantpt(C.int(f.Fd())); err != nil {
-		return err
-	}
-	return nil
-}

+ 0 - 47
vendor/github.com/containerd/console/tc_solaris_nocgo.go

@@ -1,47 +0,0 @@
-// +build solaris,!cgo
-
-/*
-   Copyright The containerd Authors.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-*/
-
-//
-// Implementing the functions below requires cgo support.  Non-cgo stubs
-// versions are defined below to enable cross-compilation of source code
-// that depends on these functions, but the resultant cross-compiled
-// binaries cannot actually be used.  If the stub function(s) below are
-// actually invoked they will display an error message and cause the
-// calling process to exit.
-//
-
-package console
-
-import (
-	"os"
-
-	"golang.org/x/sys/unix"
-)
-
-const (
-	cmdTcGet = unix.TCGETS
-	cmdTcSet = unix.TCSETS
-)
-
-func ptsname(f *os.File) (string, error) {
-	panic("ptsname() support requires cgo.")
-}
-
-func unlockpt(f *os.File) error {
-	panic("unlockpt() support requires cgo.")
-}

+ 3 - 2
vendor/github.com/containerd/console/tc_unix.go

@@ -1,4 +1,5 @@
-// +build darwin freebsd linux netbsd openbsd solaris zos
+//go:build darwin || freebsd || linux || netbsd || openbsd || zos
+// +build darwin freebsd linux netbsd openbsd zos
 
 /*
    Copyright The containerd Authors.
@@ -83,7 +84,7 @@ func cfmakeraw(t unix.Termios) unix.Termios {
 	t.Oflag &^= unix.OPOST
 	t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
 	t.Cflag &^= (unix.CSIZE | unix.PARENB)
-	t.Cflag &^= unix.CS8
+	t.Cflag |= unix.CS8
 	t.Cc[unix.VMIN] = 1
 	t.Cc[unix.VTIME] = 0
 

+ 13 - 0
vendor/github.com/containerd/console/tc_zos.go

@@ -17,6 +17,9 @@
 package console
 
 import (
+	"os"
+	"strings"
+
 	"golang.org/x/sys/unix"
 )
 
@@ -24,3 +27,13 @@ const (
 	cmdTcGet = unix.TCGETS
 	cmdTcSet = unix.TCSETS
 )
+
+// unlockpt is a no-op on zos.
+func unlockpt(_ *os.File) error {
+	return nil
+}
+
+// ptsname retrieves the name of the first available pts for the given master.
+func ptsname(f *os.File) (string, error) {
+	return "/dev/ttyp" + strings.TrimPrefix(f.Name(), "/dev/ptyp"), nil
+}

+ 1 - 2
vendor/github.com/containerd/stargz-snapshotter/estargz/build.go

@@ -436,9 +436,8 @@ func importTar(in io.ReaderAt) (*tarFile, error) {
 		if err != nil {
 			if err == io.EOF {
 				break
-			} else {
-				return nil, fmt.Errorf("failed to parse tar file, %w", err)
 			}
+			return nil, fmt.Errorf("failed to parse tar file, %w", err)
 		}
 		switch cleanEntryName(h.Name) {
 		case PrefetchLandmark, NoPrefetchLandmark:

+ 201 - 0
vendor/github.com/containernetworking/plugins/LICENSE

@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 41 - 0
vendor/github.com/containernetworking/plugins/pkg/ns/README.md

@@ -0,0 +1,41 @@
+### Namespaces, Threads, and Go
+On Linux each OS thread can have a different network namespace.  Go's thread scheduling model switches goroutines between OS threads based on OS thread load and whether the goroutine would block other goroutines.  This can result in a goroutine switching network namespaces without notice and lead to errors in your code.
+
+### Namespace Switching
+Switching namespaces with the `ns.Set()` method is not recommended without additional strategies to prevent unexpected namespace changes when your goroutines switch OS threads.
+
+Go provides the `runtime.LockOSThread()` function to ensure a specific goroutine executes on its current OS thread and prevents any other goroutine from running in that thread until the locked one exits.  Careful usage of `LockOSThread()` and goroutines can provide good control over which network namespace a given goroutine executes in.
+
+For example, you cannot rely on the `ns.Set()` namespace being the current namespace after the `Set()` call unless you do two things.  First, the goroutine calling `Set()` must have previously called `LockOSThread()`.  Second, you must ensure `runtime.UnlockOSThread()` is not called somewhere in-between.  You also cannot rely on the initial network namespace remaining the current network namespace if any other code in your program switches namespaces, unless you have already called `LockOSThread()` in that goroutine.  Note that `LockOSThread()` prevents the Go scheduler from optimally scheduling goroutines for best performance, so `LockOSThread()` should only be used in small, isolated goroutines that release the lock quickly.
+
+### Do() The Recommended Thing
+The `ns.Do()` method provides **partial** control over network namespaces for you by implementing these strategies. All code dependent on a particular network namespace (including the root namespace) should be wrapped in the `ns.Do()` method to ensure the correct namespace is selected for the duration of your code.  For example:
+
+```go
+err = targetNs.Do(func(hostNs ns.NetNS) error {
+	dummy := &netlink.Dummy{
+		LinkAttrs: netlink.LinkAttrs{
+			Name: "dummy0",
+		},
+	}
+	return netlink.LinkAdd(dummy)
+})
+```
+
+Note this requirement to wrap every network call is very onerous - any libraries you call might call out to network services such as DNS, and all such calls need to be protected after you call `ns.Do()`. All goroutines spawned from within the `ns.Do` will not inherit the new namespace. The CNI plugins all exit very soon after calling `ns.Do()` which helps to minimize the problem.
+
+When a new thread is spawned in Linux, it inherits the namespace of its parent. In versions of go **prior to 1.10**, if the runtime spawns a new OS thread, it picks the parent randomly. If the chosen parent thread has been moved to a new namespace (even temporarily), the new OS thread will be permanently "stuck in the wrong namespace", and goroutines will non-deterministically switch namespaces as they are rescheduled.
+
+In short, **there was no safe way to change network namespaces, even temporarily, from within a long-lived, multithreaded Go process**. If you wish to do this, you must use go 1.10 or greater. 
+
+
+### Creating network namespaces
+Earlier versions of this library managed namespace creation, but as CNI does not actually utilize this feature (and it was essentially unmaintained), it was removed. If you're writing a container runtime, you should implement namespace management yourself. However, there are some gotchas when doing so, especially around handling `/var/run/netns`. A reasonably correct reference implementation, borrowed from `rkt`, can be found in `pkg/testutils/netns_linux.go` if you're in need of a source of inspiration.
+
+
+### Further Reading
+ - https://github.com/golang/go/wiki/LockOSThread
+ - http://morsmachine.dk/go-scheduler
+ - https://github.com/containernetworking/cni/issues/262
+ - https://golang.org/pkg/runtime/
+ - https://www.weave.works/blog/linux-namespaces-and-go-don-t-mix

+ 234 - 0
vendor/github.com/containernetworking/plugins/pkg/ns/ns_linux.go

@@ -0,0 +1,234 @@
+// Copyright 2015-2017 CNI authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ns
+
+import (
+	"fmt"
+	"os"
+	"runtime"
+	"sync"
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+// Returns an object representing the current OS thread's network namespace
+func GetCurrentNS() (NetNS, error) {
+	// Lock the thread in case other goroutine executes in it and changes its
+	// network namespace after getCurrentThreadNetNSPath(), otherwise it might
+	// return an unexpected network namespace.
+	runtime.LockOSThread()
+	defer runtime.UnlockOSThread()
+	return GetNS(getCurrentThreadNetNSPath())
+}
+
+func getCurrentThreadNetNSPath() string {
+	// /proc/self/ns/net returns the namespace of the main thread, not
+	// of whatever thread this goroutine is running on.  Make sure we
+	// use the thread's net namespace since the thread is switching around
+	return fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), unix.Gettid())
+}
+
+func (ns *netNS) Close() error {
+	if err := ns.errorIfClosed(); err != nil {
+		return err
+	}
+
+	if err := ns.file.Close(); err != nil {
+		return fmt.Errorf("Failed to close %q: %v", ns.file.Name(), err)
+	}
+	ns.closed = true
+
+	return nil
+}
+
+func (ns *netNS) Set() error {
+	if err := ns.errorIfClosed(); err != nil {
+		return err
+	}
+
+	if err := unix.Setns(int(ns.Fd()), unix.CLONE_NEWNET); err != nil {
+		return fmt.Errorf("Error switching to ns %v: %v", ns.file.Name(), err)
+	}
+
+	return nil
+}
+
+type NetNS interface {
+	// Executes the passed closure in this object's network namespace,
+	// attempting to restore the original namespace before returning.
+	// However, since each OS thread can have a different network namespace,
+	// and Go's thread scheduling is highly variable, callers cannot
+	// guarantee any specific namespace is set unless operations that
+	// require that namespace are wrapped with Do().  Also, no code called
+	// from Do() should call runtime.UnlockOSThread(), or the risk
+	// of executing code in an incorrect namespace will be greater.  See
+	// https://github.com/golang/go/wiki/LockOSThread for further details.
+	Do(toRun func(NetNS) error) error
+
+	// Sets the current network namespace to this object's network namespace.
+	// Note that since Go's thread scheduling is highly variable, callers
+	// cannot guarantee the requested namespace will be the current namespace
+	// after this function is called; to ensure this wrap operations that
+	// require the namespace with Do() instead.
+	Set() error
+
+	// Returns the filesystem path representing this object's network namespace
+	Path() string
+
+	// Returns a file descriptor representing this object's network namespace
+	Fd() uintptr
+
+	// Cleans up this instance of the network namespace; if this instance
+	// is the last user the namespace will be destroyed
+	Close() error
+}
+
+type netNS struct {
+	file   *os.File
+	closed bool
+}
+
+// netNS implements the NetNS interface
+var _ NetNS = &netNS{}
+
+const (
+	// https://github.com/torvalds/linux/blob/master/include/uapi/linux/magic.h
+	NSFS_MAGIC   = unix.NSFS_MAGIC
+	PROCFS_MAGIC = unix.PROC_SUPER_MAGIC
+)
+
+type NSPathNotExistErr struct{ msg string }
+
+func (e NSPathNotExistErr) Error() string { return e.msg }
+
+type NSPathNotNSErr struct{ msg string }
+
+func (e NSPathNotNSErr) Error() string { return e.msg }
+
+func IsNSorErr(nspath string) error {
+	stat := syscall.Statfs_t{}
+	if err := syscall.Statfs(nspath, &stat); err != nil {
+		if os.IsNotExist(err) {
+			err = NSPathNotExistErr{msg: fmt.Sprintf("failed to Statfs %q: %v", nspath, err)}
+		} else {
+			err = fmt.Errorf("failed to Statfs %q: %v", nspath, err)
+		}
+		return err
+	}
+
+	switch stat.Type {
+	case PROCFS_MAGIC, NSFS_MAGIC:
+		return nil
+	default:
+		return NSPathNotNSErr{msg: fmt.Sprintf("unknown FS magic on %q: %x", nspath, stat.Type)}
+	}
+}
+
+// Returns an object representing the namespace referred to by @path
+func GetNS(nspath string) (NetNS, error) {
+	err := IsNSorErr(nspath)
+	if err != nil {
+		return nil, err
+	}
+
+	fd, err := os.Open(nspath)
+	if err != nil {
+		return nil, err
+	}
+
+	return &netNS{file: fd}, nil
+}
+
+func (ns *netNS) Path() string {
+	return ns.file.Name()
+}
+
+func (ns *netNS) Fd() uintptr {
+	return ns.file.Fd()
+}
+
+func (ns *netNS) errorIfClosed() error {
+	if ns.closed {
+		return fmt.Errorf("%q has already been closed", ns.file.Name())
+	}
+	return nil
+}
+
+func (ns *netNS) Do(toRun func(NetNS) error) error {
+	if err := ns.errorIfClosed(); err != nil {
+		return err
+	}
+
+	containedCall := func(hostNS NetNS) error {
+		threadNS, err := GetCurrentNS()
+		if err != nil {
+			return fmt.Errorf("failed to open current netns: %v", err)
+		}
+		defer threadNS.Close()
+
+		// switch to target namespace
+		if err = ns.Set(); err != nil {
+			return fmt.Errorf("error switching to ns %v: %v", ns.file.Name(), err)
+		}
+		defer func() {
+			err := threadNS.Set() // switch back
+			if err == nil {
+				// Unlock the current thread only when we successfully switched back
+				// to the original namespace; otherwise leave the thread locked which
+				// will force the runtime to scrap the current thread, that is maybe
+				// not as optimal but at least always safe to do.
+				runtime.UnlockOSThread()
+			}
+		}()
+
+		return toRun(hostNS)
+	}
+
+	// save a handle to current network namespace
+	hostNS, err := GetCurrentNS()
+	if err != nil {
+		return fmt.Errorf("Failed to open current namespace: %v", err)
+	}
+	defer hostNS.Close()
+
+	var wg sync.WaitGroup
+	wg.Add(1)
+
+	// Start the callback in a new green thread so that if we later fail
+	// to switch the namespace back to the original one, we can safely
+	// leave the thread locked to die without a risk of the current thread
+	// left lingering with incorrect namespace.
+	var innerError error
+	go func() {
+		defer wg.Done()
+		runtime.LockOSThread()
+		innerError = containedCall(hostNS)
+	}()
+	wg.Wait()
+
+	return innerError
+}
+
+// WithNetNSPath executes the passed closure under the given network
+// namespace, restoring the original namespace afterwards.
+func WithNetNSPath(nspath string, toRun func(NetNS) error) error {
+	ns, err := GetNS(nspath)
+	if err != nil {
+		return err
+	}
+	defer ns.Close()
+	return ns.Do(toRun)
+}

+ 0 - 34
vendor/github.com/docker/distribution/reference/helpers_deprecated.go

@@ -1,34 +0,0 @@
-package reference
-
-import "github.com/distribution/reference"
-
-// IsNameOnly returns true if reference only contains a repo name.
-//
-// Deprecated: use [reference.IsNameOnly].
-func IsNameOnly(ref reference.Named) bool {
-	return reference.IsNameOnly(ref)
-}
-
-// FamiliarName returns the familiar name string
-// for the given named, familiarizing if needed.
-//
-// Deprecated: use [reference.FamiliarName].
-func FamiliarName(ref reference.Named) string {
-	return reference.FamiliarName(ref)
-}
-
-// FamiliarString returns the familiar string representation
-// for the given reference, familiarizing if needed.
-//
-// Deprecated: use [reference.FamiliarString].
-func FamiliarString(ref reference.Reference) string {
-	return reference.FamiliarString(ref)
-}
-
-// FamiliarMatch reports whether ref matches the specified pattern.
-// See [path.Match] for supported patterns.
-//
-// Deprecated: use [reference.FamiliarMatch].
-func FamiliarMatch(pattern string, ref reference.Reference) (bool, error) {
-	return reference.FamiliarMatch(pattern, ref)
-}

+ 0 - 92
vendor/github.com/docker/distribution/reference/normalize_deprecated.go

@@ -1,92 +0,0 @@
-package reference
-
-import (
-	"regexp"
-
-	"github.com/distribution/reference"
-	"github.com/opencontainers/go-digest"
-	"github.com/opencontainers/go-digest/digestset"
-)
-
-// ParseNormalizedNamed parses a string into a named reference
-// transforming a familiar name from Docker UI to a fully
-// qualified reference. If the value may be an identifier
-// use ParseAnyReference.
-//
-// Deprecated: use [reference.ParseNormalizedNamed].
-func ParseNormalizedNamed(s string) (reference.Named, error) {
-	return reference.ParseNormalizedNamed(s)
-}
-
-// ParseDockerRef normalizes the image reference following the docker convention,
-// which allows for references to contain both a tag and a digest.
-//
-// Deprecated: use [reference.ParseDockerRef].
-func ParseDockerRef(ref string) (reference.Named, error) {
-	return reference.ParseDockerRef(ref)
-}
-
-// TagNameOnly adds the default tag "latest" to a reference if it only has
-// a repo name.
-//
-// Deprecated: use [reference.TagNameOnly].
-func TagNameOnly(ref reference.Named) reference.Named {
-	return reference.TagNameOnly(ref)
-}
-
-// ParseAnyReference parses a reference string as a possible identifier,
-// full digest, or familiar name.
-//
-// Deprecated: use [reference.ParseAnyReference].
-func ParseAnyReference(ref string) (reference.Reference, error) {
-	return reference.ParseAnyReference(ref)
-}
-
-// Functions and types below have been removed in distribution v3 and
-// have not been ported to github.com/distribution/reference. See
-// https://github.com/distribution/distribution/pull/3774
-
-var (
-	// ShortIdentifierRegexp is the format used to represent a prefix
-	// of an identifier. A prefix may be used to match a sha256 identifier
-	// within a list of trusted identifiers.
-	//
-	// Deprecated: support for short-identifiers is deprecated, and will be removed in v3.
-	ShortIdentifierRegexp = regexp.MustCompile(shortIdentifier)
-
-	shortIdentifier = `([a-f0-9]{6,64})`
-
-	// anchoredShortIdentifierRegexp is used to check if a value
-	// is a possible identifier prefix, anchored at start and end
-	// of string.
-	anchoredShortIdentifierRegexp = regexp.MustCompile(`^` + shortIdentifier + `$`)
-)
-
-type digestReference digest.Digest
-
-func (d digestReference) String() string {
-	return digest.Digest(d).String()
-}
-
-func (d digestReference) Digest() digest.Digest {
-	return digest.Digest(d)
-}
-
-// ParseAnyReferenceWithSet parses a reference string as a possible short
-// identifier to be matched in a digest set, a full digest, or familiar name.
-//
-// Deprecated: support for short-identifiers is deprecated, and will be removed in v3.
-func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
-	if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
-		dgst, err := ds.Lookup(ref)
-		if err == nil {
-			return digestReference(dgst), nil
-		}
-	} else {
-		if dgst, err := digest.Parse(ref); err == nil {
-			return digestReference(dgst), nil
-		}
-	}
-
-	return reference.ParseNormalizedNamed(ref)
-}

+ 0 - 172
vendor/github.com/docker/distribution/reference/reference_deprecated.go

@@ -1,172 +0,0 @@
-// Package reference is deprecated, and has moved to github.com/distribution/reference.
-//
-// Deprecated: use github.com/distribution/reference instead.
-package reference
-
-import (
-	"github.com/distribution/reference"
-	"github.com/opencontainers/go-digest"
-)
-
-const (
-	// NameTotalLengthMax is the maximum total number of characters in a repository name.
-	//
-	// Deprecated: use [reference.NameTotalLengthMax].
-	NameTotalLengthMax = reference.NameTotalLengthMax
-)
-
-var (
-	// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
-	//
-	// Deprecated: use [reference.ErrReferenceInvalidFormat].
-	ErrReferenceInvalidFormat = reference.ErrReferenceInvalidFormat
-
-	// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
-	//
-	// Deprecated: use [reference.ErrTagInvalidFormat].
-	ErrTagInvalidFormat = reference.ErrTagInvalidFormat
-
-	// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
-	//
-	// Deprecated: use [reference.ErrDigestInvalidFormat].
-	ErrDigestInvalidFormat = reference.ErrDigestInvalidFormat
-
-	// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
-	//
-	// Deprecated: use [reference.ErrNameContainsUppercase].
-	ErrNameContainsUppercase = reference.ErrNameContainsUppercase
-
-	// ErrNameEmpty is returned for empty, invalid repository names.
-	//
-	// Deprecated: use [reference.ErrNameEmpty].
-	ErrNameEmpty = reference.ErrNameEmpty
-
-	// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
-	//
-	// Deprecated: use [reference.ErrNameTooLong].
-	ErrNameTooLong = reference.ErrNameTooLong
-
-	// ErrNameNotCanonical is returned when a name is not canonical.
-	//
-	// Deprecated: use [reference.ErrNameNotCanonical].
-	ErrNameNotCanonical = reference.ErrNameNotCanonical
-)
-
-// Reference is an opaque object reference identifier that may include
-// modifiers such as a hostname, name, tag, and digest.
-//
-// Deprecated: use [reference.Reference].
-type Reference = reference.Reference
-
-// Field provides a wrapper type for resolving correct reference types when
-// working with encoding.
-//
-// Deprecated: use [reference.Field].
-type Field = reference.Field
-
-// AsField wraps a reference in a Field for encoding.
-//
-// Deprecated: use [reference.AsField].
-func AsField(ref reference.Reference) reference.Field {
-	return reference.AsField(ref)
-}
-
-// Named is an object with a full name
-//
-// Deprecated: use [reference.Named].
-type Named = reference.Named
-
-// Tagged is an object which has a tag
-//
-// Deprecated: use [reference.Tagged].
-type Tagged = reference.Tagged
-
-// NamedTagged is an object including a name and tag.
-//
-// Deprecated: use [reference.NamedTagged].
-type NamedTagged reference.NamedTagged
-
-// Digested is an object which has a digest
-// in which it can be referenced by
-//
-// Deprecated: use [reference.Digested].
-type Digested reference.Digested
-
-// Canonical reference is an object with a fully unique
-// name including a name with domain and digest
-//
-// Deprecated: use [reference.Canonical].
-type Canonical reference.Canonical
-
-// Domain returns the domain part of the [Named] reference.
-//
-// Deprecated: use [reference.Domain].
-func Domain(named reference.Named) string {
-	return reference.Domain(named)
-}
-
-// Path returns the name without the domain part of the [Named] reference.
-//
-// Deprecated: use [reference.Path].
-func Path(named reference.Named) (name string) {
-	return reference.Path(named)
-}
-
-// SplitHostname splits a named reference into a
-// hostname and name string. If no valid hostname is
-// found, the hostname is empty and the full value
-// is returned as name
-//
-// Deprecated: Use [reference.Domain] or [reference.Path].
-func SplitHostname(named reference.Named) (string, string) {
-	return reference.SplitHostname(named)
-}
-
-// Parse parses s and returns a syntactically valid Reference.
-// If an error was encountered it is returned, along with a nil Reference.
-//
-// Deprecated: use [reference.Parse].
-func Parse(s string) (reference.Reference, error) {
-	return reference.Parse(s)
-}
-
-// ParseNamed parses s and returns a syntactically valid reference implementing
-// the Named interface. The reference must have a name and be in the canonical
-// form, otherwise an error is returned.
-// If an error was encountered it is returned, along with a nil Reference.
-//
-// Deprecated: use [reference.ParseNamed].
-func ParseNamed(s string) (reference.Named, error) {
-	return reference.ParseNamed(s)
-}
-
-// WithName returns a named object representing the given string. If the input
-// is invalid ErrReferenceInvalidFormat will be returned.
-//
-// Deprecated: use [reference.WithName].
-func WithName(name string) (reference.Named, error) {
-	return reference.WithName(name)
-}
-
-// WithTag combines the name from "name" and the tag from "tag" to form a
-// reference incorporating both the name and the tag.
-//
-// Deprecated: use [reference.WithTag].
-func WithTag(name reference.Named, tag string) (reference.NamedTagged, error) {
-	return reference.WithTag(name, tag)
-}
-
-// WithDigest combines the name from "name" and the digest from "digest" to form
-// a reference incorporating both the name and the digest.
-//
-// Deprecated: use [reference.WithDigest].
-func WithDigest(name reference.Named, digest digest.Digest) (reference.Canonical, error) {
-	return reference.WithDigest(name, digest)
-}
-
-// TrimNamed removes any tag or digest from the named reference.
-//
-// Deprecated: use [reference.TrimNamed].
-func TrimNamed(ref reference.Named) reference.Named {
-	return reference.TrimNamed(ref)
-}

+ 0 - 50
vendor/github.com/docker/distribution/reference/regexp_deprecated.go

@@ -1,50 +0,0 @@
-package reference
-
-import (
-	"github.com/distribution/reference"
-)
-
-// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:<encoded>").
-//
-// Deprecated: use [reference.DigestRegexp].
-var DigestRegexp = reference.DigestRegexp
-
-// DomainRegexp matches hostname or IP-addresses, optionally including a port
-// number. It defines the structure of potential domain components that may be
-// part of image names. This is purposely a subset of what is allowed by DNS to
-// ensure backwards compatibility with Docker image names. It may be a subset of
-// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between
-// square brackets (excluding zone identifiers as defined by [RFC 6874] or special
-// addresses such as IPv4-Mapped).
-//
-// Deprecated: use [reference.DomainRegexp].
-//
-// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874.
-var DomainRegexp = reference.DigestRegexp
-
-// IdentifierRegexp is the format for string identifier used as a
-// content addressable identifier using sha256. These identifiers
-// are like digests without the algorithm, since sha256 is used.
-//
-// Deprecated: use [reference.IdentifierRegexp].
-var IdentifierRegexp = reference.IdentifierRegexp
-
-// NameRegexp is the format for the name component of references, including
-// an optional domain and port, but without tag or digest suffix.
-//
-// Deprecated: use [reference.NameRegexp].
-var NameRegexp = reference.NameRegexp
-
-// ReferenceRegexp is the full supported format of a reference. The regexp
-// is anchored and has capturing groups for name, tag, and digest
-// components.
-//
-// Deprecated: use [reference.ReferenceRegexp].
-var ReferenceRegexp = reference.ReferenceRegexp
-
-// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go].
-//
-// Deprecated: use [reference.TagRegexp].
-//
-// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28
-var TagRegexp = reference.TagRegexp

+ 0 - 10
vendor/github.com/docker/distribution/reference/sort_deprecated.go

@@ -1,10 +0,0 @@
-package reference
-
-import "github.com/distribution/reference"
-
-// Sort sorts string references preferring higher information references.
-//
-// Deprecated: use [reference.Sort].
-func Sort(references []string) []string {
-	return reference.Sort(references)
-}

+ 220 - 2
vendor/github.com/moby/buildkit/AUTHORS

@@ -1,66 +1,284 @@
 # This file lists all individuals having contributed content to the repository.
-# For how it is generated, see `scripts/generate-authors.sh`.
+# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
 
+a-palchikov <deemok@gmail.com>
 Aaron L. Xu <likexu@harmonycloud.cn>
 Aaron Lehmann <aaron.lehmann@docker.com>
+Aaron Lehmann <alehmann@netflix.com>
+Abdur Rehman <abdur_rehman@mentor.com>
+Addam Hardy <addam.hardy@gmail.com>
+Adrian Plata <adrian.plata@docker.com>
+Aidan Hobson Sayers <aidanhs@cantab.net>
 Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
+Alan Fregtman <941331+darkvertex@users.noreply.github.com>
+Alex Couture-Beil <alex@earthly.dev>
+Alex Mayer <amayer5125@gmail.com>
+Alex Suraci <suraci.alex@gmail.com>
 Alexander Morozov <lk4d4@docker.com>
+Alexis Murzeau <amubtdx@gmail.com>
 Alice Frosi <afrosi@de.ibm.com>
 Allen Sun <allen.sun@daocloud.io>
+Amen Belayneh <amenbelayneh@gmail.com>
+Anca Iordache <anca.iordache@docker.com>
 Anda Xu <anda.xu@docker.com>
+Anders F Björklund <anders.f.bjorklund@gmail.com>
+Andrea Bolognani <abologna@redhat.com>
+Andrea Luzzardi <aluzzardi@gmail.com>
+Andrew Chang <chang331006@gmail.com>
+Andrey Smirnov <smirnov.andrey@gmail.com>
+Andy Alt <andy5995@users.noreply.github.com>
+Andy Caldwell <andrew.caldwell@metaswitch.com>
+Ankush Agarwal <ankushagarwal11@gmail.com>
 Anthony Sottile <asottile@umich.edu>
+Anurag Goel <anurag@render.com>
+Anusha Ragunathan <anusha@docker.com>
 Arnaud Bailly <arnaud.oqube@gmail.com>
+Avi Deitcher <avi@deitcher.net>
+Bastiaan Bakker <bbakker@xebia.com>
+Ben Longo <benlongo9807@gmail.com>
+Bertrand Paquet <bertrand.paquet@gmail.com>
 Bin Liu <liubin0329@gmail.com>
+Brandon Mitchell <git@bmitch.net>
 Brian Goff <cpuguy83@gmail.com>
+Ce Gao <ce.gao@outlook.com>
+Chaerim Yeo <yeochaerim@gmail.com>
+Changwei Ge <gechangwei@bytedance.com>
+Chanhun Jeong <chanhun.jeong@navercorp.com>
+ChaosGramer <ChaosGramer@users.noreply.github.com>
+Charles Chan <charleswhchan@users.noreply.github.com>
+Charles Korn <me@charleskorn.com>
+Charles Law <claw@conduce.com>
+Chenbin <chen.bin11@zte.com.cn>
+Chris Goller <goller@gmail.com>
+Chris McKinnel <chrismckinnel@gmail.com>
+Christian Höltje <docwhat@gerf.org>
+Christian Weichel <chris@gitpod.io>
+Ciro S. Costa <cscosta@pivotal.io>
+Claudiu Belu <cbelu@cloudbasesolutions.com>
+Colin Chartier <colin.n.chartier@gmail.com>
+Corey Larson <corey@earthly.dev>
+Cory Bennett <cbennett@netflix.com>
+Cory Snider <csnider@mirantis.com>
+coryb <cbennett@netflix.com>
+CrazyMax <github@crazymax.dev>
+Csaba Apagyi <csaba.apagyi@gmail.com>
+Dan Duvall <dduvall@wikimedia.org>
+Daniel Cassidy <mail@danielcassidy.me.uk>
 Daniel Nephin <dnephin@gmail.com>
+Darren Shepherd <darren@rancher.com>
 Dave Chen <dave.chen@arm.com>
+Dave Henderson <dhenderson@gmail.com>
+Dave Tucker <dt@docker.com>
 David Calavera <david.calavera@gmail.com>
+David Dooling <dooling@gmail.com>
+David Gageot <david.gageot@docker.com>
+David Karlsson <david.karlsson@docker.com>
+Davis Schirmer <djds@bghost.xyz>
 Dennis Chen <dennis.chen@arm.com>
+dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
 Derek McGowan <derek@mcgstyle.net>
+Dharmit Shah <shahdharmit@gmail.com>
+Ding Fei <dingfei@stars.org.cn>
+dito <itodaisuke00@gmail.com>
 Doug Davis <dug@us.ibm.com>
-Edgar Lee <edgarl@netflix.com>
+Edgar Lee <edgarhinshunlee@gmail.com>
 Eli Uriegas <eli.uriegas@docker.com>
+Elias Faxö <elias.faxo@tre.se>
+Eng Zer Jun <engzerjun@gmail.com>
+Eric Engestrom <eric@engestrom.ch>
+Erik Sipsma <erik@sipsma.dev>
+eyherabh <hugogabriel.eyherabide@gmail.com>
 f0 <f0@users.noreply.github.com>
 Fernando Miguel <github@FernandoMiguel.net>
+Fiona Klute <fiona.klute@gmx.de>
+Foysal Iqbal <foysal.iqbal.fb@gmail.com>
+Fred Cox <mcfedr@gmail.com>
+Frieder Bluemle <frieder.bluemle@gmail.com>
+Gabriel <samfiragabriel@gmail.com>
+Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
+Gaetan de Villele <gdevillele@gmail.com>
+Gahl Saraf <saraf.gahl@gmail.com>
+genglu.gl <luzigeng32@163.com>
+George <george@betterde.com>
+ggjulio <juligonz@student.42.fr>
+Govind Rai <raigovind93@gmail.com>
+Grant Reaber <grant.reaber@gmail.com>
+Guilhem C <guilhem.charles@gmail.com>
+Hans van den Bogert <hansbogert@gmail.com>
 Hao Hu <hao.hu.fr@gmail.com>
+Hector S <hfsam88@gmail.com>
 Helen Xie <chenjg@harmonycloud.cn>
 Himanshu Pandey <hpandey@pivotal.io>
 Hiromu Nakamura <abctail30@gmail.com>
+HowJMay <vulxj0j8j8@gmail.com>
+Hugo Santos <hugo@namespacelabs.com>
 Ian Campbell <ijc@docker.com>
+Ilya Dmitrichenko <errordeveloper@gmail.com>
 Iskander (Alex) Sharipov <quasilyte@gmail.com>
+Jacob Gillespie <jacobwgillespie@gmail.com>
+Jacob MacElroy <jacob@okteto.com>
 Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
+Jeffrey Huang <jeffreyhuang23@gmail.com>
+Jesse Rittner <rittneje@gmail.com>
 Jessica Frazelle <acidburn@microsoft.com>
+jgeiger <jgeiger@gmail.com>
+Jitender Kumar <jitender.kumar@intel.com>
+jlecordier <jeanlecordier@hotmail.fr>
+joey <zchengjoey@gmail.com>
 John Howard <jhoward@microsoft.com>
+John Maguire <jmaguire@duosecurity.com>
+John Mulhausen <john@docker.com>
+John Tims <john.k.tims@gmail.com>
+Jon Zeolla <zeolla@gmail.com>
+Jonathan Azoff <azoff@users.noreply.github.com>
+Jonathan Giannuzzi <jonathan@giannuzzi.me>
 Jonathan Stoppani <jonathan.stoppani@divio.com>
+Jonny Stoten <jonny.stoten@docker.com>
+JordanGoasdoue <jordan.goasdoue@dailymotion.com>
+jroenf <jeroenfranse@gmail.com>
+Julian Goede <julian.goede@pm.me>
 Justas Brazauskas <brazauskasjustas@gmail.com>
+Justin Chadwell <me@jedevc.com>
 Justin Cormack <justin.cormack@docker.com>
+Justin Garrison <justin@linux.com>
+Jörg Franke <359489+NewJorg@users.noreply.github.com>
+Kang, Matthew <impulsecss@gmail.com>
+Kees Cook <keescook@chromium.org>
+Kevin Burke <kev@inburke.com>
+kevinmeredith <kevin.m.meredith@gmail.com>
+Kir Kolyshkin <kolyshkin@gmail.com>
+Kohei Tokunaga <ktokunaga.mail@gmail.com>
+Koichi Shiraishi <zchee.io@gmail.com>
+Kris-Mikael Krister <krismikael@protonmail.com>
 Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
+Kyle <Kylemit@gmail.com>
+l00397676 <lujingxiao@huawei.com>
 Lajos Papp <lalyos@yahoo.com>
+lalyos <lalyos@yahoo.com>
+Levi Harrison <levisamuelharrison@gmail.com>
+liwenqi <vikilwq@zju.edu.cn>
+lixiaobing10051267 <li.xiaobing1@zte.com.cn>
+lomot <lomot@qq.com>
+Lu Jingxiao <lujingxiao@huawei.com>
+Luca Visentin <luck.visentin@gmail.com>
+Maciej Kalisz <mdkalish@users.noreply.github.com>
+Madhav Puri <madhav.puri@gmail.com>
+Manu Gupta <manugupt1@gmail.com>
+Marcus Comstedt <marcus@mc.pp.se>
+Mark Gordon <msg555@gmail.com>
+Marko Kohtala <marko.kohtala@gmail.com>
+Mary Anthony <mary@docker.com>
+masibw <masi19bw@gmail.com>
+Matias Insaurralde <matias@insaurral.de>
+Matt Kang <impulsecss@gmail.com>
 Matt Rickard <mrick@google.com>
+Maxime Lagresle <maxime@angel.co>
 Michael Crosby <crosbymichael@gmail.com>
+Michael Friis <friism@gmail.com>
+Michael Irwin <mikesir87@gmail.com>
+Miguel Ángel Jimeno <miguelangel4b@gmail.com>
+Mihai Borobocea <MihaiBorob@gmail.com>
+Mike Brown <brownwm@us.ibm.com>
+mikelinjie <294893458@qq.com>
+Mikhail Vasin <vasin@cloud-tv.ru>
+Misty Stanley-Jones <misty@docker.com>
 Miyachi Katsuya <miyachi_katsuya@r.recruit.co.jp>
+Morgan Bauer <mbauer@us.ibm.com>
+Morlay <morlay.null@gmail.com>
+msg <msg@clinc.com>
 Nao YONASHIRO <yonashiro@r.recruit.co.jp>
 Natasha Jarus <linuxmercedes@gmail.com>
+Nathan Sullivan <nathan@nightsys.net>
+Nick Miyake <nmiyake@users.noreply.github.com>
+Nick Santos <nick.santos@docker.com>
+Nikhil Pandeti <nikhil.pandeti@utexas.edu>
 Noel Georgi <18496730+frezbo@users.noreply.github.com>
+Oliver Bristow <oliver.bristow@project-tracr.com>
+Omer Duchovne <79370724+od-cyera@users.noreply.github.com>
+Omer Mizrahi <ommizrah@microsoft.com>
 Ondrej Fabry <ofabry@cisco.com>
+Otto Kekäläinen <otto@seravo.fi>
+Pablo Chico de Guzman <pchico83@gmail.com>
+Patrick Hemmer <patrick.hemmer@gmail.com>
+Patrick Lang <plang@microsoft.com>
 Patrick Van Stee <patrick@vanstee.me>
+Paul "TBBle" Hampson <Paul.Hampson@Pobox.com>
+Paweł Gronowski <pawel.gronowski@docker.com>
+Peter Dave Hello <hsu@peterdavehello.org>
+Petr Fedchenkov <giggsoff@gmail.com>
+Phil Estes <estesp@gmail.com>
+Pierre Fenoll <pierrefenoll@gmail.com>
+pieterdd <pieterdd@users.noreply.github.com>
+Pranav Pandit <pranavp@microsoft.com>
+Pratik Raj <rajpratik71@gmail.com>
+Prayag Verma <prayag.verma@gmail.com>
+Qiang Huang <h.huangqiang@huawei.com>
+Remy Suen <remy.suen@gmail.com>
 Ri Xu <xuri.me@gmail.com>
+Rob Taylor <rob@shape.build>
+Robert Estelle <robertestelle@gmail.com>
+Rubens Figueiredo <r.figueiredo.52@gmail.com>
+Sam Whited <sam@samwhited.com>
+Sascha Schwarze <schwarzs@de.ibm.com>
+Sean P. Kane <spkane00@gmail.com>
 Sebastiaan van Stijn <github@gone.nl>
+Seiya Miyata <odradek38@gmail.com>
+Serhat Gülçiçek <serhat25@gmail.com>
+Sertac Ozercan <sozercan@gmail.com>
 Shev Yan <yandong_8212@163.com>
+Shijiang Wei <mountkin@gmail.com>
+Shingo Omura <everpeace@gmail.com>
+Shiwei Zhang <shizh@microsoft.com>
+Siebe Schaap <siebe@digibites.nl>
+Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com>
 Simon Ferquel <simon.ferquel@docker.com>
+Slava Semushin <semushin@redhat.com>
+Solomon Hykes <sh.github.6811@hykes.org>
+squeegels <1674195+squeegels@users.noreply.github.com>
+Stefan Scherer <stefan.scherer@docker.com>
 Stefan Weil <sw@weilnetz.de>
+StefanSchoof <Stefan.Schoof@direkt-gruppe.de>
+Stepan Blyshchak <stepanblischak@gmail.com>
+Steve Lohr <schdief.law@gmail.com>
+sunchunming <sunchunming1@jd.com>
+Sven Dowideit <SvenDowideit@home.org.au>
+Takuya Noguchi <takninnovationresearch@gmail.com>
 Thomas Leonard <thomas.leonard@docker.com>
+Thomas Riccardi <riccardi@systran.fr>
 Thomas Shaw <tomwillfixit@users.noreply.github.com>
+Tianon Gravi <admwiggin@gmail.com>
 Tibor Vass <tibor@docker.com>
 Tiffany Jernigan <tiffany.f.j@gmail.com>
+Tim Waugh <twaugh@redhat.com>
+Tim Wraight <tim.wraight@tangentlabs.co.uk>
 Tino Rusch <tino.rusch@gmail.com>
 Tobias Klauser <tklauser@distanz.ch>
 Tomas Tomecek <ttomecek@redhat.com>
+Tomasz Kopczynski <tomek@kopczynski.net.pl>
 Tomohiro Kusumoto <zabio1192@gmail.com>
+Troels Liebe Bentsen <tlb@nversion.dk>
 Tõnis Tiigi <tonistiigi@gmail.com>
+Valentin Lorentz <progval+git@progval.net>
+Vasek - Tom C <tom.chauveau@epitech.eu>
+Victor Vieux <victorvieux@gmail.com>
+Victoria Bialas <victoria.bialas@docker.com>
 Vincent Demeester <vincent.demeester@docker.com>
+Vlad A. Ionescu <vladaionescu@users.noreply.github.com>
+Vladislav Ivanov <vlad@ivanov.email>
+Wang Yumu <37442693@qq.com>
 Wei Fu <fuweid89@gmail.com>
+Wei Zhang <kweizh@gmail.com>
+wingkwong <wingkwong.code@gmail.com>
+Xiaofan Zhang <xiaofan.zhang@clinc.com>
+Ximo Guanter <ximo.guanter@gmail.com>
+Yamazaki Masashi <masi19bw@gmail.com>
+Yan Song <imeoer@linux.alibaba.com>
 Yong Tang <yong.tang.github@outlook.com>
 Yuichiro Kaneko <spiketeika@gmail.com>
+Yurii Rashkovskii <yrashk@gmail.com>
+Zach Badgett <zach.badgett@gmail.com>
+zhangwenlong <zhangwenlong8911@163.com>
 Ziv Tsarfati <digger18@gmail.com>
+岁丰 <genglu.gl@antfin.com>
+沈陵 <shenling.yyb@alibaba-inc.com>
 郑泽宇 <perhapszzy@sina.com>

+ 427 - 195
vendor/github.com/moby/buildkit/api/services/control/control.pb.go

@@ -367,21 +367,25 @@ func (m *UsageRecord) GetParents() []string {
 }
 
 type SolveRequest struct {
-	Ref                  string                                                   `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"`
-	Definition           *pb.Definition                                           `protobuf:"bytes,2,opt,name=Definition,proto3" json:"Definition,omitempty"`
-	Exporter             string                                                   `protobuf:"bytes,3,opt,name=Exporter,proto3" json:"Exporter,omitempty"`
-	ExporterAttrs        map[string]string                                        `protobuf:"bytes,4,rep,name=ExporterAttrs,proto3" json:"ExporterAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
-	Session              string                                                   `protobuf:"bytes,5,opt,name=Session,proto3" json:"Session,omitempty"`
-	Frontend             string                                                   `protobuf:"bytes,6,opt,name=Frontend,proto3" json:"Frontend,omitempty"`
-	FrontendAttrs        map[string]string                                        `protobuf:"bytes,7,rep,name=FrontendAttrs,proto3" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
-	Cache                CacheOptions                                             `protobuf:"bytes,8,opt,name=Cache,proto3" json:"Cache"`
-	Entitlements         []github_com_moby_buildkit_util_entitlements.Entitlement `protobuf:"bytes,9,rep,name=Entitlements,proto3,customtype=github.com/moby/buildkit/util/entitlements.Entitlement" json:"Entitlements,omitempty"`
-	FrontendInputs       map[string]*pb.Definition                                `protobuf:"bytes,10,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
-	Internal             bool                                                     `protobuf:"varint,11,opt,name=Internal,proto3" json:"Internal,omitempty"`
-	SourcePolicy         *pb1.Policy                                              `protobuf:"bytes,12,opt,name=SourcePolicy,proto3" json:"SourcePolicy,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                                                 `json:"-"`
-	XXX_unrecognized     []byte                                                   `json:"-"`
-	XXX_sizecache        int32                                                    `json:"-"`
+	Ref        string         `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"`
+	Definition *pb.Definition `protobuf:"bytes,2,opt,name=Definition,proto3" json:"Definition,omitempty"`
+	// ExporterDeprecated and ExporterAttrsDeprecated are deprecated in favor
+	// of the new Exporters. If these fields are set, then they will be
+	// appended to the Exporters field if Exporters was not explicitly set.
+	ExporterDeprecated      string                                                   `protobuf:"bytes,3,opt,name=ExporterDeprecated,proto3" json:"ExporterDeprecated,omitempty"`
+	ExporterAttrsDeprecated map[string]string                                        `protobuf:"bytes,4,rep,name=ExporterAttrsDeprecated,proto3" json:"ExporterAttrsDeprecated,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Session                 string                                                   `protobuf:"bytes,5,opt,name=Session,proto3" json:"Session,omitempty"`
+	Frontend                string                                                   `protobuf:"bytes,6,opt,name=Frontend,proto3" json:"Frontend,omitempty"`
+	FrontendAttrs           map[string]string                                        `protobuf:"bytes,7,rep,name=FrontendAttrs,proto3" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Cache                   CacheOptions                                             `protobuf:"bytes,8,opt,name=Cache,proto3" json:"Cache"`
+	Entitlements            []github_com_moby_buildkit_util_entitlements.Entitlement `protobuf:"bytes,9,rep,name=Entitlements,proto3,customtype=github.com/moby/buildkit/util/entitlements.Entitlement" json:"Entitlements,omitempty"`
+	FrontendInputs          map[string]*pb.Definition                                `protobuf:"bytes,10,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Internal                bool                                                     `protobuf:"varint,11,opt,name=Internal,proto3" json:"Internal,omitempty"`
+	SourcePolicy            *pb1.Policy                                              `protobuf:"bytes,12,opt,name=SourcePolicy,proto3" json:"SourcePolicy,omitempty"`
+	Exporters               []*Exporter                                              `protobuf:"bytes,13,rep,name=Exporters,proto3" json:"Exporters,omitempty"`
+	XXX_NoUnkeyedLiteral    struct{}                                                 `json:"-"`
+	XXX_unrecognized        []byte                                                   `json:"-"`
+	XXX_sizecache           int32                                                    `json:"-"`
 }
 
 func (m *SolveRequest) Reset()         { *m = SolveRequest{} }
@@ -431,16 +435,16 @@ func (m *SolveRequest) GetDefinition() *pb.Definition {
 	return nil
 }
 
-func (m *SolveRequest) GetExporter() string {
+func (m *SolveRequest) GetExporterDeprecated() string {
 	if m != nil {
-		return m.Exporter
+		return m.ExporterDeprecated
 	}
 	return ""
 }
 
-func (m *SolveRequest) GetExporterAttrs() map[string]string {
+func (m *SolveRequest) GetExporterAttrsDeprecated() map[string]string {
 	if m != nil {
-		return m.ExporterAttrs
+		return m.ExporterAttrsDeprecated
 	}
 	return nil
 }
@@ -494,6 +498,13 @@ func (m *SolveRequest) GetSourcePolicy() *pb1.Policy {
 	return nil
 }
 
+func (m *SolveRequest) GetExporters() []*Exporter {
+	if m != nil {
+		return m.Exporters
+	}
+	return nil
+}
+
 type CacheOptions struct {
 	// ExportRefDeprecated is deprecated in favor or the new Exports since BuildKit v0.4.0.
 	// When ExportRefDeprecated is set, the solver appends
@@ -1832,11 +1843,12 @@ func (m *Descriptor) GetAnnotations() map[string]string {
 }
 
 type BuildResultInfo struct {
-	Result               *Descriptor   `protobuf:"bytes,1,opt,name=Result,proto3" json:"Result,omitempty"`
-	Attestations         []*Descriptor `protobuf:"bytes,2,rep,name=Attestations,proto3" json:"Attestations,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
-	XXX_unrecognized     []byte        `json:"-"`
-	XXX_sizecache        int32         `json:"-"`
+	ResultDeprecated     *Descriptor           `protobuf:"bytes,1,opt,name=ResultDeprecated,proto3" json:"ResultDeprecated,omitempty"`
+	Attestations         []*Descriptor         `protobuf:"bytes,2,rep,name=Attestations,proto3" json:"Attestations,omitempty"`
+	Results              map[int64]*Descriptor `protobuf:"bytes,3,rep,name=Results,proto3" json:"Results,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
 }
 
 func (m *BuildResultInfo) Reset()         { *m = BuildResultInfo{} }
@@ -1872,9 +1884,9 @@ func (m *BuildResultInfo) XXX_DiscardUnknown() {
 
 var xxx_messageInfo_BuildResultInfo proto.InternalMessageInfo
 
-func (m *BuildResultInfo) GetResult() *Descriptor {
+func (m *BuildResultInfo) GetResultDeprecated() *Descriptor {
 	if m != nil {
-		return m.Result
+		return m.ResultDeprecated
 	}
 	return nil
 }
@@ -1886,8 +1898,18 @@ func (m *BuildResultInfo) GetAttestations() []*Descriptor {
 	return nil
 }
 
+func (m *BuildResultInfo) GetResults() map[int64]*Descriptor {
+	if m != nil {
+		return m.Results
+	}
+	return nil
+}
+
+// Exporter describes the output exporter
 type Exporter struct {
-	Type                 string            `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"`
+	// Type identifies the exporter
+	Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"`
+	// Attrs specifies exporter configuration
 	Attrs                map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
 	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
 	XXX_unrecognized     []byte            `json:"-"`
@@ -1948,7 +1970,7 @@ func init() {
 	proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse")
 	proto.RegisterType((*UsageRecord)(nil), "moby.buildkit.v1.UsageRecord")
 	proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.SolveRequest")
-	proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.ExporterAttrsEntry")
+	proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.ExporterAttrsDeprecatedEntry")
 	proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.FrontendAttrsEntry")
 	proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.SolveRequest.FrontendInputsEntry")
 	proto.RegisterType((*CacheOptions)(nil), "moby.buildkit.v1.CacheOptions")
@@ -1979,6 +2001,7 @@ func init() {
 	proto.RegisterType((*Descriptor)(nil), "moby.buildkit.v1.Descriptor")
 	proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.Descriptor.AnnotationsEntry")
 	proto.RegisterType((*BuildResultInfo)(nil), "moby.buildkit.v1.BuildResultInfo")
+	proto.RegisterMapType((map[int64]*Descriptor)(nil), "moby.buildkit.v1.BuildResultInfo.ResultsEntry")
 	proto.RegisterType((*Exporter)(nil), "moby.buildkit.v1.Exporter")
 	proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.Exporter.AttrsEntry")
 }
@@ -1986,149 +2009,152 @@ func init() {
 func init() { proto.RegisterFile("control.proto", fileDescriptor_0c5120591600887d) }
 
 var fileDescriptor_0c5120591600887d = []byte{
-	// 2261 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0xcd, 0x6e, 0x1b, 0xc9,
-	0x11, 0xde, 0x21, 0x25, 0xfe, 0x14, 0x29, 0x59, 0x6a, 0x7b, 0x8d, 0xc9, 0xc4, 0x2b, 0xc9, 0xb3,
-	0x76, 0x22, 0x38, 0xf6, 0x50, 0xcb, 0xac, 0x63, 0xaf, 0x9c, 0x38, 0x16, 0x45, 0x66, 0x2d, 0xc7,
-	0x82, 0xb5, 0x2d, 0x79, 0x0d, 0x2c, 0xe0, 0x04, 0x23, 0xb2, 0x45, 0x0f, 0x34, 0x9c, 0x99, 0x74,
-	0x37, 0xb5, 0xe6, 0x3e, 0x40, 0x80, 0xcd, 0x21, 0xc8, 0x25, 0xc8, 0x25, 0xf7, 0x9c, 0x72, 0xce,
-	0x13, 0x04, 0xf0, 0x31, 0xe7, 0x3d, 0x38, 0x81, 0x1f, 0x20, 0xc8, 0x31, 0xb9, 0x05, 0xfd, 0x33,
-	0xe4, 0x90, 0x33, 0x94, 0x28, 0xdb, 0x27, 0x76, 0x75, 0xd7, 0x57, 0x53, 0x55, 0x5d, 0x5d, 0x5d,
-	0xd5, 0x84, 0x85, 0x76, 0x18, 0x70, 0x1a, 0xfa, 0x4e, 0x44, 0x43, 0x1e, 0xa2, 0xa5, 0x5e, 0x78,
-	0x38, 0x70, 0x0e, 0xfb, 0x9e, 0xdf, 0x39, 0xf6, 0xb8, 0x73, 0xf2, 0x89, 0x75, 0xab, 0xeb, 0xf1,
-	0x17, 0xfd, 0x43, 0xa7, 0x1d, 0xf6, 0x6a, 0xdd, 0xb0, 0x1b, 0xd6, 0x24, 0xe3, 0x61, 0xff, 0x48,
-	0x52, 0x92, 0x90, 0x23, 0x25, 0xc0, 0x5a, 0xed, 0x86, 0x61, 0xd7, 0x27, 0x23, 0x2e, 0xee, 0xf5,
-	0x08, 0xe3, 0x6e, 0x2f, 0xd2, 0x0c, 0x37, 0x13, 0xf2, 0xc4, 0xc7, 0x6a, 0xf1, 0xc7, 0x6a, 0x2c,
-	0xf4, 0x4f, 0x08, 0xad, 0x45, 0x87, 0xb5, 0x30, 0x62, 0x9a, 0xbb, 0x36, 0x95, 0xdb, 0x8d, 0xbc,
-	0x1a, 0x1f, 0x44, 0x84, 0xd5, 0xbe, 0x0e, 0xe9, 0x31, 0xa1, 0x1a, 0x50, 0x9f, 0x54, 0x57, 0xe9,
-	0xe3, 0x46, 0x1e, 0xd3, 0xc3, 0x1a, 0x8d, 0xda, 0x35, 0xc6, 0x5d, 0xde, 0x8f, 0x3f, 0x72, 0xfb,
-	0x14, 0x95, 0xfa, 0xb4, 0x4d, 0xa2, 0xd0, 0xf7, 0xda, 0x03, 0xa1, 0x98, 0x1a, 0x29, 0x98, 0xfd,
-	0x5b, 0x03, 0xaa, 0x7b, 0xb4, 0x1f, 0x10, 0x4c, 0x7e, 0xd3, 0x27, 0x8c, 0xa3, 0xcb, 0x50, 0x38,
-	0xf2, 0x7c, 0x4e, 0xa8, 0x69, 0xac, 0xe5, 0xd7, 0xcb, 0x58, 0x53, 0x68, 0x09, 0xf2, 0xae, 0xef,
-	0x9b, 0xb9, 0x35, 0x63, 0xbd, 0x84, 0xc5, 0x10, 0xad, 0x43, 0xf5, 0x98, 0x90, 0xa8, 0xd9, 0xa7,
-	0x2e, 0xf7, 0xc2, 0xc0, 0xcc, 0xaf, 0x19, 0xeb, 0xf9, 0xc6, 0xdc, 0xab, 0xd7, 0xab, 0x06, 0x1e,
-	0x5b, 0x41, 0x36, 0x94, 0x05, 0xdd, 0x18, 0x70, 0xc2, 0xcc, 0xb9, 0x04, 0xdb, 0x68, 0xda, 0xbe,
-	0x01, 0x4b, 0x4d, 0x8f, 0x1d, 0x3f, 0x65, 0x6e, 0xf7, 0x2c, 0x5d, 0xec, 0x47, 0xb0, 0x9c, 0xe0,
-	0x65, 0x51, 0x18, 0x30, 0x82, 0x6e, 0x43, 0x81, 0x92, 0x76, 0x48, 0x3b, 0x92, 0xb9, 0x52, 0xff,
-	0xc8, 0x99, 0x0c, 0x03, 0x47, 0x03, 0x04, 0x13, 0xd6, 0xcc, 0xf6, 0x9f, 0xf2, 0x50, 0x49, 0xcc,
-	0xa3, 0x45, 0xc8, 0xed, 0x34, 0x4d, 0x63, 0xcd, 0x58, 0x2f, 0xe3, 0xdc, 0x4e, 0x13, 0x99, 0x50,
-	0xdc, 0xed, 0x73, 0xf7, 0xd0, 0x27, 0xda, 0xf6, 0x98, 0x44, 0x97, 0x60, 0x7e, 0x27, 0x78, 0xca,
-	0x88, 0x34, 0xbc, 0x84, 0x15, 0x81, 0x10, 0xcc, 0xed, 0x7b, 0xdf, 0x10, 0x65, 0x26, 0x96, 0x63,
-	0x64, 0x41, 0x61, 0xcf, 0xa5, 0x24, 0xe0, 0xe6, 0xbc, 0x90, 0xdb, 0xc8, 0x99, 0x06, 0xd6, 0x33,
-	0xa8, 0x01, 0xe5, 0x6d, 0x4a, 0x5c, 0x4e, 0x3a, 0x5b, 0xdc, 0x2c, 0xac, 0x19, 0xeb, 0x95, 0xba,
-	0xe5, 0xa8, 0x4d, 0x76, 0xe2, 0xf8, 0x73, 0x0e, 0xe2, 0xf8, 0x6b, 0x94, 0x5e, 0xbd, 0x5e, 0xfd,
-	0xe0, 0x0f, 0xff, 0x14, 0xbe, 0x1b, 0xc2, 0xd0, 0x03, 0x80, 0xc7, 0x2e, 0xe3, 0x4f, 0x99, 0x14,
-	0x52, 0x3c, 0x53, 0xc8, 0x9c, 0x14, 0x90, 0xc0, 0xa0, 0x15, 0x00, 0xe9, 0x84, 0xed, 0xb0, 0x1f,
-	0x70, 0xb3, 0x24, 0x75, 0x4f, 0xcc, 0xa0, 0x35, 0xa8, 0x34, 0x09, 0x6b, 0x53, 0x2f, 0x92, 0x5b,
-	0x5d, 0x96, 0xee, 0x49, 0x4e, 0x09, 0x09, 0xca, 0x83, 0x07, 0x83, 0x88, 0x98, 0x20, 0x19, 0x12,
-	0x33, 0x62, 0x2f, 0xf7, 0x5f, 0xb8, 0x94, 0x74, 0xcc, 0x8a, 0x74, 0x97, 0xa6, 0x84, 0x7f, 0x95,
-	0x27, 0x98, 0x59, 0x95, 0x9b, 0x1c, 0x93, 0xf6, 0xef, 0x8a, 0x50, 0xdd, 0x17, 0xc7, 0x29, 0x0e,
-	0x87, 0x25, 0xc8, 0x63, 0x72, 0xa4, 0xf7, 0x46, 0x0c, 0x91, 0x03, 0xd0, 0x24, 0x47, 0x5e, 0xe0,
-	0x49, 0xad, 0x72, 0xd2, 0xf0, 0x45, 0x27, 0x3a, 0x74, 0x46, 0xb3, 0x38, 0xc1, 0x81, 0x2c, 0x28,
-	0xb5, 0x5e, 0x46, 0x21, 0x15, 0x21, 0x95, 0x97, 0x62, 0x86, 0x34, 0x7a, 0x06, 0x0b, 0xf1, 0x78,
-	0x8b, 0x73, 0x2a, 0x02, 0x55, 0x84, 0xd1, 0x27, 0xe9, 0x30, 0x4a, 0x2a, 0xe5, 0x8c, 0x61, 0x5a,
-	0x01, 0xa7, 0x03, 0x3c, 0x2e, 0x47, 0x58, 0xb8, 0x4f, 0x18, 0x13, 0x1a, 0xca, 0xed, 0xc7, 0x31,
-	0x29, 0xd4, 0xf9, 0x05, 0x0d, 0x03, 0x4e, 0x82, 0x8e, 0xdc, 0xfa, 0x32, 0x1e, 0xd2, 0x42, 0x9d,
-	0x78, 0xac, 0xd4, 0x29, 0xce, 0xa4, 0xce, 0x18, 0x46, 0xab, 0x33, 0x36, 0x87, 0x36, 0x61, 0x7e,
-	0xdb, 0x6d, 0xbf, 0x20, 0x72, 0x97, 0x2b, 0xf5, 0x95, 0xb4, 0x40, 0xb9, 0xfc, 0x44, 0x6e, 0x2b,
-	0x93, 0x07, 0xf5, 0x03, 0xac, 0x20, 0xe8, 0x57, 0x50, 0x6d, 0x05, 0xdc, 0xe3, 0x3e, 0xe9, 0xc9,
-	0x1d, 0x2b, 0x8b, 0x1d, 0x6b, 0x6c, 0x7e, 0xf7, 0x7a, 0xf5, 0x27, 0x53, 0xd3, 0x4f, 0x9f, 0x7b,
-	0x7e, 0x8d, 0x24, 0x50, 0x4e, 0x42, 0x04, 0x1e, 0x93, 0x87, 0xbe, 0x82, 0xc5, 0x58, 0xd9, 0x9d,
-	0x20, 0xea, 0x73, 0x66, 0x82, 0xb4, 0xba, 0x3e, 0xa3, 0xd5, 0x0a, 0xa4, 0xcc, 0x9e, 0x90, 0x24,
-	0x9c, 0xbd, 0x13, 0x70, 0x42, 0x03, 0xd7, 0xd7, 0x21, 0x38, 0xa4, 0xd1, 0x8e, 0x88, 0x34, 0x91,
-	0x25, 0xf7, 0x64, 0x6e, 0x34, 0xab, 0xd2, 0x35, 0xd7, 0xd3, 0x5f, 0x4d, 0xe6, 0x52, 0x47, 0x31,
-	0xe3, 0x31, 0xa8, 0xf5, 0x00, 0x50, 0x3a, 0x24, 0x44, 0xe8, 0x1e, 0x93, 0x41, 0x1c, 0xba, 0xc7,
-	0x64, 0x20, 0xb2, 0xc7, 0x89, 0xeb, 0xf7, 0x55, 0x56, 0x29, 0x63, 0x45, 0x6c, 0xe6, 0xee, 0x1a,
-	0x42, 0x42, 0x7a, 0x17, 0xcf, 0x25, 0xe1, 0x0b, 0xb8, 0x98, 0xe1, 0x91, 0x0c, 0x11, 0xd7, 0x92,
-	0x22, 0xd2, 0x47, 0x67, 0x24, 0xd2, 0xfe, 0x6b, 0x1e, 0xaa, 0xc9, 0xb8, 0x40, 0x1b, 0x70, 0x51,
-	0xd9, 0x89, 0xc9, 0x51, 0x93, 0x44, 0x94, 0xb4, 0x45, 0x32, 0xd2, 0xc2, 0xb3, 0x96, 0x50, 0x1d,
-	0x2e, 0xed, 0xf4, 0xf4, 0x34, 0x4b, 0x40, 0x72, 0xf2, 0xd8, 0x67, 0xae, 0xa1, 0x10, 0x3e, 0x54,
-	0xa2, 0xa4, 0x27, 0x12, 0xa0, 0xbc, 0x8c, 0x8b, 0xcf, 0x4e, 0x0f, 0x5e, 0x27, 0x13, 0xab, 0xc2,
-	0x23, 0x5b, 0x2e, 0xfa, 0x19, 0x14, 0xd5, 0x42, 0x7c, 0xfe, 0x3f, 0x3e, 0xfd, 0x13, 0x4a, 0x58,
-	0x8c, 0x11, 0x70, 0x65, 0x07, 0x33, 0xe7, 0xcf, 0x01, 0xd7, 0x18, 0xeb, 0x21, 0x58, 0xd3, 0x55,
-	0x3e, 0x4f, 0x08, 0xd8, 0x7f, 0x31, 0x60, 0x39, 0xf5, 0x21, 0x71, 0x39, 0xc9, 0xf4, 0xac, 0x44,
-	0xc8, 0x31, 0x6a, 0xc2, 0xbc, 0x4a, 0x30, 0x39, 0xa9, 0xb0, 0x33, 0x83, 0xc2, 0x4e, 0x22, 0xbb,
-	0x28, 0xb0, 0x75, 0x17, 0xe0, 0xed, 0x82, 0xd5, 0xfe, 0x9b, 0x01, 0x0b, 0xfa, 0x30, 0xeb, 0x9b,
-	0xdc, 0x85, 0xa5, 0xf8, 0x08, 0xc5, 0x73, 0xfa, 0x4e, 0xbf, 0x3d, 0x35, 0x0f, 0x28, 0x36, 0x67,
-	0x12, 0xa7, 0x74, 0x4c, 0x89, 0xb3, 0xb6, 0xe3, 0xb8, 0x9a, 0x60, 0x3d, 0x97, 0xe6, 0x57, 0x61,
-	0x61, 0x5f, 0x96, 0x60, 0x53, 0x2f, 0x28, 0xfb, 0x3f, 0x06, 0x2c, 0xc6, 0x3c, 0xda, 0xba, 0x4f,
-	0xa1, 0x74, 0x42, 0x28, 0x27, 0x2f, 0x09, 0xd3, 0x56, 0x99, 0x69, 0xab, 0xbe, 0x94, 0x1c, 0x78,
-	0xc8, 0x89, 0x36, 0xa1, 0xa4, 0xca, 0x3d, 0x12, 0x6f, 0xd4, 0xca, 0x34, 0x94, 0xfe, 0xde, 0x90,
-	0x1f, 0xd5, 0x60, 0xce, 0x0f, 0xbb, 0x4c, 0x9f, 0x99, 0xef, 0x4f, 0xc3, 0x3d, 0x0e, 0xbb, 0x58,
-	0x32, 0xa2, 0x7b, 0x50, 0xfa, 0xda, 0xa5, 0x81, 0x17, 0x74, 0xe3, 0x53, 0xb0, 0x3a, 0x0d, 0xf4,
-	0x4c, 0xf1, 0xe1, 0x21, 0x40, 0x14, 0x54, 0x05, 0xb5, 0x86, 0x1e, 0x41, 0xa1, 0xe3, 0x75, 0x09,
-	0xe3, 0xca, 0x25, 0x8d, 0xba, 0xb8, 0x4b, 0xbe, 0x7b, 0xbd, 0x7a, 0x23, 0x71, 0x59, 0x84, 0x11,
-	0x09, 0x44, 0xf9, 0xee, 0x7a, 0x01, 0xa1, 0xa2, 0xbc, 0xbd, 0xa5, 0x20, 0x4e, 0x53, 0xfe, 0x60,
-	0x2d, 0x41, 0xc8, 0xf2, 0xd4, 0x95, 0x20, 0xf3, 0xc5, 0xdb, 0xc9, 0x52, 0x12, 0xc4, 0x31, 0x08,
-	0xdc, 0x1e, 0xd1, 0x25, 0x80, 0x1c, 0x8b, 0xfa, 0xa4, 0x2d, 0xe2, 0xbc, 0x23, 0x2b, 0xb7, 0x12,
-	0xd6, 0x14, 0xda, 0x84, 0x22, 0xe3, 0x2e, 0x15, 0x39, 0x67, 0x7e, 0xc6, 0xc2, 0x2a, 0x06, 0xa0,
-	0xfb, 0x50, 0x6e, 0x87, 0xbd, 0xc8, 0x27, 0x02, 0x5d, 0x98, 0x11, 0x3d, 0x82, 0x88, 0xd0, 0x23,
-	0x94, 0x86, 0x54, 0x96, 0x74, 0x65, 0xac, 0x08, 0x74, 0x07, 0x16, 0x22, 0x1a, 0x76, 0x29, 0x61,
-	0xec, 0x73, 0x1a, 0xf6, 0x23, 0x7d, 0x91, 0x2f, 0x8b, 0xe4, 0xbd, 0x97, 0x5c, 0xc0, 0xe3, 0x7c,
-	0xf6, 0xbf, 0x73, 0x50, 0x4d, 0x86, 0x48, 0xaa, 0xd6, 0x7d, 0x04, 0x05, 0x15, 0x70, 0x2a, 0xd6,
-	0xdf, 0xce, 0xc7, 0x4a, 0x42, 0xa6, 0x8f, 0x4d, 0x28, 0xb6, 0xfb, 0x54, 0x16, 0xc2, 0xaa, 0x3c,
-	0x8e, 0x49, 0x61, 0x29, 0x0f, 0xb9, 0xeb, 0x4b, 0x1f, 0xe7, 0xb1, 0x22, 0x44, 0x6d, 0x3c, 0xec,
-	0xbc, 0xce, 0x57, 0x1b, 0x0f, 0x61, 0xc9, 0xfd, 0x2b, 0xbe, 0xd3, 0xfe, 0x95, 0xce, 0xbd, 0x7f,
-	0xf6, 0xdf, 0x0d, 0x28, 0x0f, 0xcf, 0x56, 0xc2, 0xbb, 0xc6, 0x3b, 0x7b, 0x77, 0xcc, 0x33, 0xb9,
-	0xb7, 0xf3, 0xcc, 0x65, 0x28, 0x30, 0x4e, 0x89, 0xdb, 0x53, 0x9d, 0x1b, 0xd6, 0x94, 0xc8, 0x62,
-	0x3d, 0xd6, 0x95, 0x3b, 0x54, 0xc5, 0x62, 0x68, 0xff, 0xd7, 0x80, 0x85, 0xb1, 0xe3, 0xfe, 0x5e,
-	0x6d, 0xb9, 0x04, 0xf3, 0x3e, 0x39, 0x21, 0xaa, 0xb7, 0xcc, 0x63, 0x45, 0x88, 0x59, 0xf6, 0x22,
-	0xa4, 0x5c, 0x2a, 0x57, 0xc5, 0x8a, 0x10, 0x3a, 0x77, 0x08, 0x77, 0x3d, 0x5f, 0xe6, 0xa5, 0x2a,
-	0xd6, 0x94, 0xd0, 0xb9, 0x4f, 0x7d, 0x5d, 0x5f, 0x8b, 0x21, 0xb2, 0x61, 0xce, 0x0b, 0x8e, 0x42,
-	0x1d, 0x36, 0xb2, 0xb2, 0x51, 0x75, 0xda, 0x4e, 0x70, 0x14, 0x62, 0xb9, 0x86, 0xae, 0x42, 0x81,
-	0xba, 0x41, 0x97, 0xc4, 0xc5, 0x75, 0x59, 0x70, 0x61, 0x31, 0x83, 0xf5, 0x82, 0x6d, 0x43, 0x55,
-	0xf6, 0xa7, 0xbb, 0x84, 0x89, 0x6e, 0x48, 0x84, 0x75, 0xc7, 0xe5, 0xae, 0x34, 0xbb, 0x8a, 0xe5,
-	0xd8, 0xbe, 0x09, 0xe8, 0xb1, 0xc7, 0xf8, 0x33, 0xd9, 0xc2, 0xb3, 0xb3, 0x9a, 0xd7, 0x7d, 0xb8,
-	0x38, 0xc6, 0xad, 0xaf, 0x85, 0x9f, 0x4e, 0xb4, 0xaf, 0xd7, 0xd2, 0x19, 0x57, 0xbe, 0x14, 0x38,
-	0x0a, 0x38, 0xd1, 0xc5, 0x2e, 0x40, 0x45, 0xda, 0xa5, 0xbe, 0x6d, 0xbb, 0x50, 0x55, 0xa4, 0x16,
-	0xfe, 0x05, 0x5c, 0x88, 0x05, 0x7d, 0x49, 0xa8, 0x6c, 0x45, 0x0c, 0xe9, 0x97, 0x1f, 0x4e, 0xfb,
-	0x4a, 0x63, 0x9c, 0x1d, 0x4f, 0xe2, 0x6d, 0x02, 0x17, 0x25, 0xcf, 0x43, 0x8f, 0xf1, 0x90, 0x0e,
-	0x62, 0xab, 0x57, 0x00, 0xb6, 0xda, 0xdc, 0x3b, 0x21, 0x4f, 0x02, 0x5f, 0x5d, 0xa3, 0x25, 0x9c,
-	0x98, 0x89, 0xaf, 0xc8, 0xdc, 0xa8, 0x87, 0xbb, 0x02, 0xe5, 0x96, 0x4b, 0xfd, 0x41, 0xeb, 0xa5,
-	0xc7, 0x75, 0x2b, 0x3d, 0x9a, 0xb0, 0x7f, 0x6f, 0xc0, 0x72, 0xf2, 0x3b, 0xad, 0x13, 0x91, 0x2e,
-	0xee, 0xc1, 0x1c, 0x8f, 0xeb, 0x98, 0xc5, 0x2c, 0x23, 0x52, 0x10, 0x51, 0xea, 0x60, 0x09, 0x4a,
-	0x78, 0x5a, 0x1d, 0x9c, 0x6b, 0xa7, 0xc3, 0x27, 0x3c, 0xfd, 0xbf, 0x12, 0xa0, 0xf4, 0x72, 0x46,
-	0x6f, 0x9a, 0x6c, 0xee, 0x72, 0x13, 0xcd, 0xdd, 0xf3, 0xc9, 0xe6, 0x4e, 0x5d, 0xcd, 0x77, 0x66,
-	0xd1, 0x64, 0x86, 0x16, 0xef, 0x2e, 0x94, 0xe3, 0xea, 0x26, 0xbe, 0xc0, 0xad, 0xb4, 0xe8, 0x61,
-	0x01, 0x34, 0x62, 0x46, 0xeb, 0xf1, 0x8d, 0xa3, 0xee, 0x3a, 0x14, 0xe7, 0x14, 0x1a, 0xb5, 0x1d,
-	0x5d, 0x57, 0xe8, 0x5b, 0xe8, 0xfe, 0xf9, 0xde, 0x2d, 0xe6, 0x26, 0xdf, 0x2c, 0x1a, 0x50, 0xd9,
-	0x8e, 0x13, 0xe5, 0x39, 0x1e, 0x2d, 0x92, 0x20, 0xb4, 0xa1, 0x0b, 0x1b, 0x95, 0x9a, 0xaf, 0xa4,
-	0x4d, 0x8c, 0x1f, 0x28, 0x42, 0xaa, 0x2b, 0x9b, 0xa3, 0x8c, 0xd2, 0xb2, 0x2c, 0x1d, 0xb4, 0x39,
-	0x93, 0xef, 0x67, 0xac, 0x2f, 0xd1, 0x67, 0x50, 0xc0, 0x84, 0xf5, 0x7d, 0x2e, 0x5f, 0x42, 0x2a,
-	0xf5, 0xab, 0x53, 0xa4, 0x2b, 0x26, 0x79, 0x56, 0x35, 0x00, 0xfd, 0x12, 0x8a, 0x6a, 0xc4, 0xcc,
-	0xca, 0xb4, 0x96, 0x3f, 0x43, 0x33, 0x8d, 0xd1, 0x0d, 0x85, 0xa6, 0xc4, 0x71, 0xfc, 0x9c, 0x04,
-	0x44, 0xbf, 0xd0, 0x89, 0xb6, 0x76, 0x1e, 0x27, 0x66, 0x50, 0x1d, 0xe6, 0x39, 0x75, 0xdb, 0xc4,
-	0x5c, 0x98, 0xc1, 0x85, 0x8a, 0x55, 0x24, 0xb6, 0xc8, 0x0b, 0x02, 0xd2, 0x31, 0x17, 0x55, 0xa5,
-	0xa4, 0x28, 0xf4, 0x03, 0x58, 0x0c, 0xfa, 0x3d, 0xd9, 0x2c, 0x74, 0xf6, 0x39, 0x89, 0x98, 0x79,
-	0x41, 0x7e, 0x6f, 0x62, 0x16, 0x5d, 0x83, 0x85, 0xa0, 0xdf, 0x3b, 0x10, 0x37, 0xbc, 0x62, 0x5b,
-	0x92, 0x6c, 0xe3, 0x93, 0xe8, 0x26, 0x2c, 0x0b, 0x5c, 0xbc, 0xdb, 0x8a, 0x73, 0x59, 0x72, 0xa6,
-	0x17, 0xde, 0x43, 0xcf, 0xfc, 0x3e, 0x3a, 0x02, 0xeb, 0x39, 0x54, 0x93, 0xfb, 0x90, 0x81, 0xbd,
-	0x33, 0xde, 0x71, 0xcf, 0x10, 0x17, 0x89, 0x86, 0xe3, 0x39, 0x7c, 0xef, 0x69, 0xd4, 0x71, 0x39,
-	0xc9, 0xca, 0xbc, 0xe9, 0x0c, 0x74, 0x19, 0x0a, 0x7b, 0x6a, 0xa3, 0xd4, 0xcb, 0xa5, 0xa6, 0xc4,
-	0x7c, 0x93, 0x08, 0xe7, 0xe9, 0x74, 0xab, 0x29, 0xfb, 0x0a, 0x58, 0x59, 0xe2, 0x95, 0x33, 0xec,
-	0x3f, 0xe7, 0x00, 0x46, 0xc1, 0x80, 0x3e, 0x02, 0xe8, 0x91, 0x8e, 0xe7, 0xfe, 0x9a, 0x8f, 0x1a,
-	0xca, 0xb2, 0x9c, 0x91, 0x5d, 0xe5, 0xa8, 0xf4, 0xcf, 0xbd, 0x73, 0xe9, 0x8f, 0x60, 0x8e, 0x79,
-	0xdf, 0x10, 0x5d, 0xa6, 0xc8, 0x31, 0x7a, 0x02, 0x15, 0x37, 0x08, 0x42, 0x2e, 0xc3, 0x38, 0x6e,
-	0xb6, 0x6f, 0x9d, 0x16, 0xbe, 0xce, 0xd6, 0x88, 0x5f, 0x9d, 0x92, 0xa4, 0x04, 0xeb, 0x3e, 0x2c,
-	0x4d, 0x32, 0x9c, 0xab, 0x19, 0xfc, 0xd6, 0x80, 0x0b, 0x13, 0x5b, 0x87, 0x3e, 0x1d, 0x66, 0x01,
-	0x63, 0x86, 0xe3, 0x15, 0x27, 0x80, 0x07, 0x50, 0xdd, 0xe2, 0x5c, 0x64, 0x3d, 0x65, 0x9b, 0x6a,
-	0xf7, 0x4e, 0xc7, 0x8e, 0x21, 0xec, 0x3f, 0x1a, 0xa3, 0x77, 0xce, 0xcc, 0x9e, 0xff, 0xde, 0x78,
-	0xcf, 0x7f, 0x7d, 0xfa, 0xe5, 0xf0, 0x3e, 0x5b, 0xfd, 0x1b, 0x3f, 0x87, 0x0f, 0x33, 0x2f, 0x66,
-	0x54, 0x81, 0xe2, 0xfe, 0xc1, 0x16, 0x3e, 0x68, 0x35, 0x97, 0x3e, 0x40, 0x55, 0x28, 0x6d, 0x3f,
-	0xd9, 0xdd, 0x7b, 0xdc, 0x3a, 0x68, 0x2d, 0x19, 0x62, 0xa9, 0xd9, 0x12, 0xe3, 0xe6, 0x52, 0xae,
-	0xfe, 0x6d, 0x01, 0x8a, 0xdb, 0xea, 0xbf, 0x1e, 0x74, 0x00, 0xe5, 0xe1, 0x9f, 0x00, 0xc8, 0xce,
-	0xf0, 0xce, 0xc4, 0xbf, 0x09, 0xd6, 0xc7, 0xa7, 0xf2, 0xe8, 0xc4, 0xfd, 0x10, 0xe6, 0xe5, 0xdf,
-	0x21, 0x28, 0xa3, 0xbd, 0x4e, 0xfe, 0x4f, 0x62, 0x9d, 0xfe, 0xf7, 0xc2, 0x86, 0x21, 0x24, 0xc9,
-	0xb7, 0x89, 0x2c, 0x49, 0xc9, 0xc7, 0x4b, 0x6b, 0xf5, 0x8c, 0x47, 0x0d, 0xb4, 0x0b, 0x05, 0xdd,
-	0xb0, 0x65, 0xb1, 0x26, 0x5f, 0x20, 0xac, 0xb5, 0xe9, 0x0c, 0x4a, 0xd8, 0x86, 0x81, 0x76, 0x87,
-	0xef, 0xd1, 0x59, 0xaa, 0x25, 0xab, 0x5d, 0xeb, 0x8c, 0xf5, 0x75, 0x63, 0xc3, 0x40, 0x5f, 0x41,
-	0x25, 0x51, 0xcf, 0xa2, 0x8c, 0x6a, 0x2a, 0x5d, 0x1c, 0x5b, 0xd7, 0xcf, 0xe0, 0xd2, 0x96, 0xb7,
-	0x60, 0x4e, 0x1e, 0xa4, 0x0c, 0x67, 0x27, 0xca, 0xdd, 0x2c, 0x35, 0xc7, 0xca, 0xdf, 0x43, 0x55,
-	0xa0, 0x93, 0x20, 0x19, 0x7d, 0xe8, 0xfa, 0x59, 0xf7, 0xea, 0xd4, 0xb0, 0x49, 0x05, 0xf1, 0x86,
-	0x81, 0x42, 0x40, 0xe9, 0xe4, 0x89, 0x7e, 0x94, 0x11, 0x25, 0xd3, 0x32, 0xb8, 0x75, 0x73, 0x36,
-	0x66, 0x65, 0x54, 0xa3, 0xfa, 0xea, 0xcd, 0x8a, 0xf1, 0x8f, 0x37, 0x2b, 0xc6, 0xbf, 0xde, 0xac,
-	0x18, 0x87, 0x05, 0x59, 0x31, 0xfd, 0xf8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7c, 0xb8, 0xc3,
-	0x68, 0x0b, 0x1d, 0x00, 0x00,
+	// 2315 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0x4f, 0x73, 0x1b, 0x49,
+	0x15, 0xcf, 0x48, 0xb2, 0x2c, 0x3d, 0xc9, 0x8e, 0xdc, 0xc9, 0x86, 0x61, 0xc8, 0xda, 0xce, 0x6c,
+	0x02, 0xae, 0x90, 0x8c, 0xbc, 0x82, 0x90, 0xac, 0x03, 0x21, 0xb6, 0x25, 0x36, 0x0e, 0x49, 0xc5,
+	0xdb, 0x76, 0x36, 0xd4, 0x56, 0x05, 0x6a, 0x2c, 0xb5, 0x95, 0x29, 0x8f, 0x66, 0x86, 0xee, 0x96,
+	0x37, 0xde, 0x13, 0x27, 0xaa, 0xb8, 0x50, 0x5c, 0x28, 0x2e, 0xdc, 0x39, 0x71, 0xe6, 0xcc, 0x81,
+	0xaa, 0x1c, 0x39, 0xef, 0x21, 0x50, 0xf9, 0x00, 0x14, 0x47, 0xb8, 0x6d, 0xf5, 0x9f, 0x91, 0x46,
+	0x9a, 0x91, 0x2d, 0x25, 0x39, 0xa9, 0x5f, 0xf7, 0xfb, 0xbd, 0x79, 0xef, 0xf5, 0xeb, 0xd7, 0xef,
+	0xb5, 0x60, 0xa1, 0x1d, 0x06, 0x9c, 0x86, 0xbe, 0x13, 0xd1, 0x90, 0x87, 0xa8, 0xd6, 0x0b, 0x0f,
+	0x4e, 0x9c, 0x83, 0xbe, 0xe7, 0x77, 0x8e, 0x3c, 0xee, 0x1c, 0x7f, 0x6c, 0x35, 0xba, 0x1e, 0x7f,
+	0xd1, 0x3f, 0x70, 0xda, 0x61, 0xaf, 0xde, 0x0d, 0xbb, 0x61, 0xbd, 0x1b, 0x86, 0x5d, 0x9f, 0xb8,
+	0x91, 0xc7, 0xf4, 0xb0, 0x4e, 0xa3, 0x76, 0x9d, 0x71, 0x97, 0xf7, 0x99, 0x92, 0x62, 0xdd, 0x1c,
+	0xc7, 0xc8, 0xe9, 0x83, 0xfe, 0xa1, 0xa4, 0x24, 0x21, 0x47, 0x9a, 0xbd, 0x9e, 0x60, 0x17, 0xdf,
+	0xaf, 0xc7, 0xdf, 0xaf, 0xbb, 0x91, 0x57, 0xe7, 0x27, 0x11, 0x61, 0xf5, 0x2f, 0x43, 0x7a, 0x44,
+	0xa8, 0x06, 0xdc, 0x98, 0x08, 0x60, 0xa1, 0x7f, 0x4c, 0x68, 0x3d, 0x3a, 0xa8, 0x87, 0x51, 0xac,
+	0xcd, 0xad, 0x53, 0xb8, 0xfb, 0xb4, 0x4d, 0xa2, 0xd0, 0xf7, 0xda, 0x27, 0x02, 0xa3, 0x46, 0x1a,
+	0xb6, 0xa2, 0xad, 0x1b, 0xe8, 0xce, 0xbd, 0x1e, 0x61, 0xdc, 0xed, 0x45, 0x8a, 0xc1, 0xfe, 0xad,
+	0x01, 0xd5, 0x5d, 0xda, 0x0f, 0x08, 0x26, 0xbf, 0xee, 0x13, 0xc6, 0xd1, 0x25, 0x28, 0x1e, 0x7a,
+	0x3e, 0x27, 0xd4, 0x34, 0x56, 0xf3, 0x6b, 0x65, 0xac, 0x29, 0x54, 0x83, 0xbc, 0xeb, 0xfb, 0x66,
+	0x6e, 0xd5, 0x58, 0x2b, 0x61, 0x31, 0x44, 0x6b, 0x50, 0x3d, 0x22, 0x24, 0x6a, 0xf6, 0xa9, 0xcb,
+	0xbd, 0x30, 0x30, 0xf3, 0xab, 0xc6, 0x5a, 0x7e, 0xab, 0xf0, 0xea, 0xf5, 0x8a, 0x81, 0x47, 0x56,
+	0x90, 0x0d, 0x65, 0x41, 0x6f, 0x9d, 0x70, 0xc2, 0xcc, 0x42, 0x82, 0x6d, 0x38, 0x6d, 0x5f, 0x87,
+	0x5a, 0xd3, 0x63, 0x47, 0x4f, 0x99, 0xdb, 0x3d, 0x4b, 0x17, 0xfb, 0x21, 0x2c, 0x25, 0x78, 0x59,
+	0x14, 0x06, 0x8c, 0xa0, 0x5b, 0x50, 0xa4, 0xa4, 0x1d, 0xd2, 0x8e, 0x64, 0xae, 0x34, 0x3e, 0x74,
+	0xc6, 0xc3, 0xc0, 0xd1, 0x00, 0xc1, 0x84, 0x35, 0xb3, 0xfd, 0xa7, 0x3c, 0x54, 0x12, 0xf3, 0x68,
+	0x11, 0x72, 0x3b, 0x4d, 0xd3, 0x58, 0x35, 0xd6, 0xca, 0x38, 0xb7, 0xd3, 0x44, 0x26, 0xcc, 0x3f,
+	0xee, 0x73, 0xf7, 0xc0, 0x27, 0xda, 0xf6, 0x98, 0x44, 0x17, 0x61, 0x6e, 0x27, 0x78, 0xca, 0x88,
+	0x34, 0xbc, 0x84, 0x15, 0x81, 0x10, 0x14, 0xf6, 0xbc, 0xaf, 0x88, 0x32, 0x13, 0xcb, 0x31, 0xb2,
+	0xa0, 0xb8, 0xeb, 0x52, 0x12, 0x70, 0x73, 0x4e, 0xc8, 0xdd, 0xca, 0x99, 0x06, 0xd6, 0x33, 0x68,
+	0x0b, 0xca, 0xdb, 0x94, 0xb8, 0x9c, 0x74, 0x36, 0xb9, 0x59, 0x5c, 0x35, 0xd6, 0x2a, 0x0d, 0xcb,
+	0x51, 0xbb, 0xe6, 0xc4, 0xbb, 0xe6, 0xec, 0xc7, 0xbb, 0xb6, 0x55, 0x7a, 0xf5, 0x7a, 0xe5, 0xdc,
+	0x1f, 0xfe, 0x25, 0x7c, 0x37, 0x80, 0xa1, 0xfb, 0x00, 0x8f, 0x5c, 0xc6, 0x9f, 0x32, 0x29, 0x64,
+	0xfe, 0x4c, 0x21, 0x05, 0x29, 0x20, 0x81, 0x41, 0xcb, 0x00, 0xd2, 0x09, 0xdb, 0x61, 0x3f, 0xe0,
+	0x66, 0x49, 0xea, 0x9e, 0x98, 0x41, 0xab, 0x50, 0x69, 0x12, 0xd6, 0xa6, 0x5e, 0x24, 0xb7, 0xba,
+	0x2c, 0xdd, 0x93, 0x9c, 0x12, 0x12, 0x94, 0x07, 0xf7, 0x4f, 0x22, 0x62, 0x82, 0x64, 0x48, 0xcc,
+	0x88, 0xbd, 0xdc, 0x7b, 0xe1, 0x52, 0xd2, 0x31, 0x2b, 0xd2, 0x5d, 0x9a, 0x12, 0xfe, 0x55, 0x9e,
+	0x60, 0x66, 0x55, 0x6e, 0x72, 0x4c, 0xda, 0xbf, 0x29, 0x41, 0x75, 0x4f, 0x1c, 0x85, 0x38, 0x1c,
+	0x6a, 0x90, 0xc7, 0xe4, 0x50, 0xef, 0x8d, 0x18, 0x22, 0x07, 0xa0, 0x49, 0x0e, 0xbd, 0xc0, 0x93,
+	0x5a, 0xe5, 0xa4, 0xe1, 0x8b, 0x4e, 0x74, 0xe0, 0x0c, 0x67, 0x71, 0x82, 0x03, 0x39, 0x80, 0x5a,
+	0x2f, 0xa3, 0x90, 0x72, 0x42, 0x9b, 0x24, 0xa2, 0xa4, 0x2d, 0x1c, 0x28, 0xf7, 0xaf, 0x8c, 0x33,
+	0x56, 0x50, 0x1f, 0xbe, 0x15, 0xcf, 0x6e, 0x72, 0x4e, 0x59, 0x02, 0x54, 0x90, 0x41, 0x76, 0x37,
+	0x1d, 0x64, 0x49, 0x95, 0x9d, 0x09, 0xe8, 0x56, 0xc0, 0xe9, 0x09, 0x9e, 0x24, 0x5b, 0xf8, 0x64,
+	0x8f, 0x30, 0x26, 0x6c, 0x92, 0x01, 0x83, 0x63, 0x12, 0x59, 0x50, 0xfa, 0x19, 0x0d, 0x03, 0x4e,
+	0x82, 0x8e, 0x0c, 0x96, 0x32, 0x1e, 0xd0, 0xe8, 0x19, 0x2c, 0xc4, 0x63, 0x29, 0xd0, 0x9c, 0x97,
+	0x2a, 0x7e, 0x7c, 0x86, 0x8a, 0x23, 0x18, 0xa5, 0xd8, 0xa8, 0x1c, 0xb4, 0x01, 0x73, 0xdb, 0x6e,
+	0xfb, 0x05, 0x91, 0x71, 0x51, 0x69, 0x2c, 0xa7, 0x05, 0xca, 0xe5, 0x27, 0x32, 0x10, 0x98, 0x3c,
+	0xda, 0xe7, 0xb0, 0x82, 0xa0, 0x5f, 0x42, 0xb5, 0x15, 0x70, 0x8f, 0xfb, 0xa4, 0x27, 0xf7, 0xb8,
+	0x2c, 0xf6, 0x78, 0x6b, 0xe3, 0xeb, 0xd7, 0x2b, 0x3f, 0x9a, 0x98, 0xd1, 0xfa, 0xdc, 0xf3, 0xeb,
+	0x24, 0x81, 0x72, 0x12, 0x22, 0xf0, 0x88, 0x3c, 0xf4, 0x05, 0x2c, 0xc6, 0xca, 0xee, 0x04, 0x51,
+	0x9f, 0x33, 0x13, 0xa4, 0xd5, 0x8d, 0x29, 0xad, 0x56, 0x20, 0x65, 0xf6, 0x98, 0x24, 0xe1, 0xec,
+	0x9d, 0x80, 0x13, 0x1a, 0xb8, 0xbe, 0x0e, 0xda, 0x01, 0x8d, 0x76, 0x44, 0x6c, 0x8a, 0xc4, 0xbb,
+	0x2b, 0xd3, 0xad, 0x59, 0x95, 0xae, 0xb9, 0x96, 0xfe, 0x6a, 0x32, 0x3d, 0x3b, 0x8a, 0x19, 0x8f,
+	0x40, 0xd1, 0x1d, 0x28, 0xc7, 0x81, 0xc0, 0xcc, 0x05, 0xa9, 0xbd, 0x95, 0x96, 0x13, 0xb3, 0xe0,
+	0x21, 0xb3, 0xf5, 0x10, 0x2e, 0x9f, 0x16, 0x60, 0xe2, 0xc0, 0x1c, 0x91, 0x93, 0xf8, 0xc0, 0x1c,
+	0x91, 0x13, 0x91, 0xb3, 0x8e, 0x5d, 0xbf, 0xaf, 0x72, 0x59, 0x19, 0x2b, 0x62, 0x23, 0x77, 0xc7,
+	0xb0, 0xee, 0x03, 0x4a, 0x47, 0xc2, 0x4c, 0x12, 0x3e, 0x83, 0x0b, 0x19, 0x5e, 0xcd, 0x10, 0x71,
+	0x35, 0x29, 0x22, 0x7d, 0x60, 0x87, 0x22, 0xed, 0xbf, 0xe6, 0xa1, 0x9a, 0x8c, 0x2d, 0xb4, 0x0e,
+	0x17, 0x94, 0xc5, 0x98, 0x1c, 0x26, 0x0e, 0xa3, 0x12, 0x9e, 0xb5, 0x84, 0x1a, 0x70, 0x71, 0xa7,
+	0xa7, 0xa7, 0x93, 0xe7, 0x37, 0x27, 0x93, 0x4d, 0xe6, 0x1a, 0x0a, 0xe1, 0x03, 0x25, 0x6a, 0xfc,
+	0xd0, 0xe7, 0xe5, 0xee, 0x7c, 0x72, 0xfa, 0x01, 0x70, 0x32, 0xb1, 0x2a, 0xc4, 0xb2, 0xe5, 0xa2,
+	0x9f, 0xc0, 0xbc, 0x5a, 0x60, 0x3a, 0xaf, 0x7c, 0x74, 0xfa, 0x27, 0x94, 0xb0, 0x18, 0x23, 0xe0,
+	0xca, 0x0e, 0x66, 0xce, 0xcd, 0x00, 0xd7, 0x18, 0xeb, 0x01, 0x58, 0x93, 0x55, 0x9e, 0x25, 0x04,
+	0xec, 0xbf, 0x18, 0xb0, 0x94, 0xfa, 0x90, 0xb8, 0x12, 0xe5, 0xa5, 0xa0, 0x44, 0xc8, 0x31, 0x6a,
+	0xc2, 0x9c, 0x4a, 0x52, 0x39, 0xa9, 0xb0, 0x33, 0x85, 0xc2, 0x4e, 0x22, 0x43, 0x29, 0xb0, 0x75,
+	0x07, 0xe0, 0xed, 0x82, 0xd5, 0xfe, 0x9b, 0x01, 0x0b, 0x3a, 0x21, 0xe8, 0xfa, 0xc1, 0x85, 0xda,
+	0xe0, 0x8c, 0xe9, 0x39, 0x5d, 0x49, 0xdc, 0x9a, 0x98, 0x4b, 0x14, 0x9b, 0x33, 0x8e, 0x53, 0x3a,
+	0xa6, 0xc4, 0x59, 0xdb, 0x71, 0x5c, 0x8d, 0xb1, 0xce, 0xa4, 0xf9, 0x15, 0x58, 0xd8, 0x93, 0x75,
+	0xea, 0xc4, 0x6b, 0xd1, 0xfe, 0xaf, 0x01, 0x8b, 0x31, 0x8f, 0xb6, 0xee, 0x87, 0x50, 0x3a, 0x26,
+	0x94, 0x93, 0x97, 0x84, 0x69, 0xab, 0xcc, 0xb4, 0x55, 0x9f, 0x4b, 0x0e, 0x3c, 0xe0, 0x44, 0x1b,
+	0x50, 0x52, 0x35, 0x31, 0x89, 0x37, 0x6a, 0x79, 0x12, 0x4a, 0x7f, 0x6f, 0xc0, 0x8f, 0xea, 0x50,
+	0xf0, 0xc3, 0x2e, 0xd3, 0x67, 0xe6, 0x3b, 0x93, 0x70, 0x8f, 0xc2, 0x2e, 0x96, 0x8c, 0xe8, 0x2e,
+	0x94, 0xbe, 0x74, 0x69, 0xe0, 0x05, 0xdd, 0xf8, 0x14, 0xac, 0x4c, 0x02, 0x3d, 0x53, 0x7c, 0x78,
+	0x00, 0x10, 0x65, 0x5c, 0x51, 0xad, 0xa1, 0x87, 0x50, 0xec, 0x78, 0x5d, 0xc2, 0xb8, 0x72, 0xc9,
+	0x56, 0x43, 0xdc, 0x47, 0x5f, 0xbf, 0x5e, 0xb9, 0x9e, 0xb8, 0x70, 0xc2, 0x88, 0x04, 0xa2, 0x69,
+	0x70, 0xbd, 0x80, 0x50, 0xd1, 0x03, 0xdc, 0x54, 0x10, 0xa7, 0x29, 0x7f, 0xb0, 0x96, 0x20, 0x64,
+	0x79, 0xea, 0x5a, 0x91, 0xf9, 0xe2, 0xed, 0x64, 0x29, 0x09, 0xe2, 0x18, 0x04, 0x6e, 0x8f, 0xe8,
+	0x72, 0x43, 0x8e, 0x45, 0x55, 0xd4, 0x16, 0x71, 0xde, 0x91, 0xf5, 0x62, 0x09, 0x6b, 0x0a, 0x6d,
+	0xc0, 0x3c, 0xe3, 0x2e, 0x15, 0x39, 0x67, 0x6e, 0xca, 0x72, 0x2e, 0x06, 0xa0, 0x7b, 0x50, 0x6e,
+	0x87, 0xbd, 0xc8, 0x27, 0x02, 0x5d, 0x9c, 0x12, 0x3d, 0x84, 0x88, 0xd0, 0x23, 0x94, 0x86, 0x54,
+	0x16, 0x92, 0x65, 0xac, 0x08, 0x74, 0x1b, 0x16, 0x22, 0x1a, 0x76, 0x29, 0x61, 0xec, 0x53, 0x1a,
+	0xf6, 0x23, 0x5d, 0x0c, 0x2c, 0x89, 0xe4, 0xbd, 0x9b, 0x5c, 0xc0, 0xa3, 0x7c, 0xf6, 0x7f, 0x72,
+	0x50, 0x4d, 0x86, 0x48, 0xaa, 0xc2, 0x7e, 0x08, 0x45, 0x15, 0x70, 0x2a, 0xd6, 0xdf, 0xce, 0xc7,
+	0x4a, 0x42, 0xa6, 0x8f, 0x4d, 0x98, 0x6f, 0xf7, 0xa9, 0x2c, 0xbf, 0x55, 0x51, 0x1e, 0x93, 0xc2,
+	0x52, 0x1e, 0x72, 0xd7, 0x97, 0x3e, 0xce, 0x63, 0x45, 0x88, 0x8a, 0x7c, 0xd0, 0x25, 0xcd, 0x56,
+	0x91, 0x0f, 0x60, 0xc9, 0xfd, 0x9b, 0x7f, 0xa7, 0xfd, 0x2b, 0xcd, 0xbc, 0x7f, 0xf6, 0x3f, 0x0c,
+	0x28, 0x0f, 0xce, 0x56, 0xc2, 0xbb, 0xc6, 0x3b, 0x7b, 0x77, 0xc4, 0x33, 0xb9, 0xb7, 0xf3, 0xcc,
+	0x25, 0x28, 0x32, 0x4e, 0x89, 0xdb, 0x53, 0xfd, 0x22, 0xd6, 0x94, 0xc8, 0x62, 0x3d, 0xd6, 0x95,
+	0x3b, 0x54, 0xc5, 0x62, 0x68, 0xff, 0xcf, 0x80, 0x85, 0x91, 0xe3, 0xfe, 0x5e, 0x6d, 0xb9, 0x08,
+	0x73, 0x3e, 0x39, 0x26, 0xaa, 0xa3, 0xcd, 0x63, 0x45, 0x88, 0x59, 0xf6, 0x22, 0xa4, 0x5c, 0x2a,
+	0x57, 0xc5, 0x8a, 0x10, 0x3a, 0x77, 0x08, 0x77, 0x3d, 0x5f, 0xe6, 0xa5, 0x2a, 0xd6, 0x94, 0xd0,
+	0xb9, 0x4f, 0x7d, 0x5d, 0xa3, 0x8b, 0x21, 0xb2, 0xa1, 0xe0, 0x05, 0x87, 0xa1, 0x0e, 0x1b, 0x59,
+	0xd9, 0xa8, 0x5a, 0x6f, 0x27, 0x38, 0x0c, 0xb1, 0x5c, 0x43, 0x57, 0xa0, 0x48, 0xdd, 0xa0, 0x4b,
+	0xe2, 0x02, 0xbd, 0x2c, 0xb8, 0xb0, 0x98, 0xc1, 0x7a, 0xc1, 0xb6, 0xa1, 0x2a, 0xbb, 0xe2, 0xc7,
+	0x84, 0x89, 0x1e, 0x4c, 0x84, 0x75, 0xc7, 0xe5, 0xae, 0x34, 0xbb, 0x8a, 0xe5, 0xd8, 0xbe, 0x01,
+	0xe8, 0x91, 0xc7, 0xf8, 0x33, 0xf9, 0xa6, 0xc0, 0xce, 0x6a, 0x99, 0xf7, 0xe0, 0xc2, 0x08, 0xb7,
+	0xbe, 0x16, 0x7e, 0x3c, 0xd6, 0x34, 0x5f, 0x4d, 0x67, 0x5c, 0xf9, 0x74, 0xe1, 0x28, 0xe0, 0x58,
+	0xef, 0xbc, 0x00, 0x15, 0x69, 0x97, 0xfa, 0xb6, 0xed, 0x42, 0x55, 0x91, 0x5a, 0xf8, 0x67, 0x70,
+	0x3e, 0x16, 0xf4, 0x39, 0xa1, 0xb2, 0x9d, 0x31, 0xa4, 0x5f, 0xbe, 0x37, 0xe9, 0x2b, 0x5b, 0xa3,
+	0xec, 0x78, 0x1c, 0x6f, 0x13, 0xb8, 0x20, 0x79, 0x1e, 0x78, 0x8c, 0x87, 0xf4, 0x24, 0xb6, 0x7a,
+	0x19, 0x60, 0xb3, 0xcd, 0xbd, 0x63, 0xf2, 0x24, 0xf0, 0xd5, 0x35, 0x5a, 0xc2, 0x89, 0x99, 0xf8,
+	0x8a, 0xcc, 0x0d, 0x3b, 0xc7, 0xcb, 0x50, 0x6e, 0xb9, 0xd4, 0x3f, 0x69, 0xbd, 0xf4, 0xb8, 0x6e,
+	0xe0, 0x87, 0x13, 0xf6, 0xef, 0x0d, 0x58, 0x4a, 0x7e, 0xa7, 0x75, 0x2c, 0xd2, 0xc5, 0x5d, 0x28,
+	0xf0, 0xb8, 0x8e, 0x59, 0xcc, 0x32, 0x22, 0x05, 0x11, 0xa5, 0x0e, 0x96, 0xa0, 0x84, 0xa7, 0xd5,
+	0xc1, 0xb9, 0x7a, 0x3a, 0x7c, 0xcc, 0xd3, 0xff, 0x2f, 0x01, 0x4a, 0x2f, 0x67, 0x74, 0xc4, 0xc9,
+	0x06, 0x31, 0x37, 0xd6, 0x20, 0x3e, 0x1f, 0x6f, 0x10, 0xd5, 0xd5, 0x7c, 0x7b, 0x1a, 0x4d, 0xa6,
+	0x68, 0x13, 0x47, 0xfa, 0x98, 0xc2, 0x0c, 0x7d, 0x0c, 0x5a, 0x8b, 0x6f, 0x1c, 0x75, 0xd7, 0xa1,
+	0x38, 0xa7, 0xd0, 0xa8, 0xed, 0xe8, 0xba, 0x42, 0xdf, 0x42, 0xf7, 0x66, 0x7b, 0x2d, 0x29, 0x8c,
+	0xbf, 0x94, 0x6c, 0x41, 0x65, 0x3b, 0x4e, 0x94, 0x33, 0x3c, 0x95, 0x24, 0x41, 0x68, 0x5d, 0x17,
+	0x36, 0x2a, 0x35, 0x5f, 0x4e, 0x9b, 0x18, 0x3f, 0x8b, 0x84, 0x54, 0x57, 0x36, 0x87, 0x19, 0xa5,
+	0x65, 0x59, 0x3a, 0x68, 0x63, 0x2a, 0xdf, 0x4f, 0x59, 0x5f, 0xa2, 0x4f, 0xa0, 0x88, 0x09, 0xeb,
+	0xfb, 0x5c, 0xbe, 0xbf, 0x54, 0x1a, 0x57, 0x26, 0x48, 0x57, 0x4c, 0xf2, 0xac, 0x6a, 0x00, 0xfa,
+	0x39, 0xcc, 0xab, 0x11, 0x33, 0x2b, 0x93, 0x9e, 0x0d, 0x32, 0x34, 0xd3, 0x18, 0xdd, 0x50, 0x68,
+	0x4a, 0x1c, 0xc7, 0x4f, 0x49, 0x40, 0xf4, 0xbb, 0xa0, 0x68, 0x8d, 0xe7, 0x70, 0x62, 0x06, 0x35,
+	0x60, 0x8e, 0x53, 0xb7, 0x4d, 0xcc, 0x85, 0x29, 0x5c, 0xa8, 0x58, 0x45, 0x62, 0x8b, 0xbc, 0x20,
+	0x20, 0x1d, 0x73, 0x51, 0x55, 0x4a, 0x8a, 0x42, 0xdf, 0x85, 0xc5, 0xa0, 0xdf, 0x93, 0xcd, 0x42,
+	0x67, 0x8f, 0x93, 0x88, 0x99, 0xe7, 0xe5, 0xf7, 0xc6, 0x66, 0xd1, 0x55, 0x58, 0x08, 0xfa, 0xbd,
+	0x7d, 0x71, 0xc3, 0x2b, 0xb6, 0x9a, 0x64, 0x1b, 0x9d, 0x44, 0x37, 0x60, 0x49, 0xe0, 0xe2, 0xdd,
+	0x56, 0x9c, 0x4b, 0x92, 0x33, 0xbd, 0xf0, 0x1e, 0x7a, 0xe6, 0xf7, 0xd1, 0x11, 0x58, 0xcf, 0xa1,
+	0x9a, 0xdc, 0x87, 0x0c, 0xec, 0xed, 0xd1, 0x8e, 0x7b, 0x8a, 0xb8, 0x48, 0x34, 0x1c, 0xcf, 0xe1,
+	0xdb, 0x4f, 0xa3, 0x8e, 0xcb, 0x49, 0x56, 0xe6, 0x4d, 0x67, 0xa0, 0x4b, 0x50, 0xdc, 0x55, 0x1b,
+	0xa5, 0xde, 0x4b, 0x35, 0x25, 0xe6, 0x9b, 0x44, 0x38, 0x4f, 0xa7, 0x5b, 0x4d, 0xd9, 0x97, 0xc1,
+	0xca, 0x12, 0xaf, 0x9c, 0x61, 0xff, 0x39, 0x07, 0x30, 0x0c, 0x06, 0xf4, 0x21, 0x40, 0x8f, 0x74,
+	0x3c, 0xf7, 0x57, 0x7c, 0xd8, 0x50, 0x96, 0xe5, 0x8c, 0xec, 0x2a, 0x87, 0xa5, 0x7f, 0xee, 0x9d,
+	0x4b, 0x7f, 0x04, 0x05, 0xe6, 0x7d, 0x45, 0x74, 0x99, 0x22, 0xc7, 0xe8, 0x09, 0x54, 0xdc, 0x20,
+	0x08, 0xb9, 0x0c, 0xe3, 0xb8, 0xd9, 0xbe, 0x79, 0x5a, 0xf8, 0x3a, 0x9b, 0x43, 0x7e, 0x75, 0x4a,
+	0x92, 0x12, 0xac, 0x7b, 0x50, 0x1b, 0x67, 0x98, 0xa9, 0x19, 0xfc, 0x7b, 0x0e, 0xce, 0x8f, 0x6d,
+	0x1d, 0x7a, 0x00, 0x35, 0x45, 0x8d, 0x3d, 0x90, 0x9c, 0x75, 0xd0, 0x52, 0x28, 0x74, 0x1f, 0xaa,
+	0x9b, 0x9c, 0x8b, 0x4c, 0xa8, 0xec, 0x55, 0x2d, 0xe0, 0xe9, 0x52, 0x46, 0x10, 0xe8, 0xc1, 0x30,
+	0xad, 0xe4, 0x27, 0x35, 0xfa, 0x63, 0xfa, 0x67, 0xe7, 0x14, 0xeb, 0x17, 0x93, 0x83, 0x3c, 0xaf,
+	0xbc, 0xd4, 0x18, 0x0d, 0xf2, 0x33, 0xb2, 0xca, 0xd0, 0x87, 0x7f, 0x34, 0xa0, 0x14, 0x1f, 0xc2,
+	0xcc, 0xb7, 0x8a, 0xbb, 0xa3, 0x6f, 0x15, 0xd7, 0x26, 0x5f, 0x6a, 0xef, 0xf3, 0x89, 0xe2, 0xfa,
+	0x4f, 0xe1, 0x83, 0xcc, 0x82, 0x02, 0x55, 0x60, 0x7e, 0x6f, 0x7f, 0x13, 0xef, 0xb7, 0x9a, 0xb5,
+	0x73, 0xa8, 0x0a, 0xa5, 0xed, 0x27, 0x8f, 0x77, 0x1f, 0xb5, 0xf6, 0x5b, 0x35, 0x43, 0x2c, 0x35,
+	0x5b, 0x62, 0xdc, 0xac, 0xe5, 0x1a, 0xbf, 0x2b, 0xc2, 0xfc, 0xb6, 0xfa, 0x67, 0x0c, 0xed, 0x43,
+	0x79, 0xf0, 0x97, 0x09, 0xb2, 0x33, 0x5c, 0x33, 0xf6, 0xdf, 0x8b, 0xf5, 0xd1, 0xa9, 0x3c, 0xfa,
+	0xc2, 0x79, 0x00, 0x73, 0xf2, 0xcf, 0x23, 0x94, 0xf1, 0x2c, 0x90, 0xfc, 0x57, 0xc9, 0x3a, 0xfd,
+	0xcf, 0x98, 0x75, 0x43, 0x48, 0x92, 0x6f, 0x2a, 0x59, 0x92, 0x92, 0x0f, 0xb7, 0xd6, 0xca, 0x19,
+	0x8f, 0x31, 0xe8, 0x31, 0x14, 0x75, 0xa3, 0x99, 0xc5, 0x9a, 0x7c, 0x39, 0xb1, 0x56, 0x27, 0x33,
+	0x28, 0x61, 0xeb, 0x06, 0x7a, 0x3c, 0x78, 0x8b, 0xcf, 0x52, 0x2d, 0x59, 0xa5, 0x5b, 0x67, 0xac,
+	0xaf, 0x19, 0xeb, 0x06, 0xfa, 0x02, 0x2a, 0x89, 0x3a, 0x1c, 0x65, 0x54, 0x81, 0xe9, 0xa2, 0xde,
+	0xba, 0x76, 0x06, 0x97, 0xb6, 0xbc, 0x05, 0x05, 0x99, 0x00, 0x32, 0x9c, 0x9d, 0x28, 0xd3, 0xb3,
+	0xd4, 0x1c, 0x29, 0xdb, 0x0f, 0x54, 0x63, 0x41, 0x82, 0x64, 0xf4, 0xa1, 0x6b, 0x67, 0xd5, 0x03,
+	0x13, 0xc3, 0x26, 0x15, 0xc4, 0xeb, 0x06, 0x0a, 0x01, 0xa5, 0x93, 0x3e, 0xfa, 0x7e, 0x46, 0x94,
+	0x4c, 0xba, 0x79, 0xac, 0x1b, 0xd3, 0x31, 0x2b, 0xa3, 0xb6, 0xaa, 0xaf, 0xde, 0x2c, 0x1b, 0xff,
+	0x7c, 0xb3, 0x6c, 0xfc, 0xfb, 0xcd, 0xb2, 0x71, 0x50, 0x94, 0x95, 0xde, 0x0f, 0xbe, 0x09, 0x00,
+	0x00, 0xff, 0xff, 0x16, 0xc8, 0xe5, 0x4c, 0x39, 0x1e, 0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -2892,6 +2918,20 @@ func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 		i -= len(m.XXX_unrecognized)
 		copy(dAtA[i:], m.XXX_unrecognized)
 	}
+	if len(m.Exporters) > 0 {
+		for iNdEx := len(m.Exporters) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Exporters[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintControl(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x6a
+		}
+	}
 	if m.SourcePolicy != nil {
 		{
 			size, err := m.SourcePolicy.MarshalToSizedBuffer(dAtA[:i])
@@ -2992,9 +3032,9 @@ func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 		i--
 		dAtA[i] = 0x2a
 	}
-	if len(m.ExporterAttrs) > 0 {
-		for k := range m.ExporterAttrs {
-			v := m.ExporterAttrs[k]
+	if len(m.ExporterAttrsDeprecated) > 0 {
+		for k := range m.ExporterAttrsDeprecated {
+			v := m.ExporterAttrsDeprecated[k]
 			baseI := i
 			i -= len(v)
 			copy(dAtA[i:], v)
@@ -3011,10 +3051,10 @@ func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 			dAtA[i] = 0x22
 		}
 	}
-	if len(m.Exporter) > 0 {
-		i -= len(m.Exporter)
-		copy(dAtA[i:], m.Exporter)
-		i = encodeVarintControl(dAtA, i, uint64(len(m.Exporter)))
+	if len(m.ExporterDeprecated) > 0 {
+		i -= len(m.ExporterDeprecated)
+		copy(dAtA[i:], m.ExporterDeprecated)
+		i = encodeVarintControl(dAtA, i, uint64(len(m.ExporterDeprecated)))
 		i--
 		dAtA[i] = 0x1a
 	}
@@ -4339,6 +4379,30 @@ func (m *BuildResultInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 		i -= len(m.XXX_unrecognized)
 		copy(dAtA[i:], m.XXX_unrecognized)
 	}
+	if len(m.Results) > 0 {
+		for k := range m.Results {
+			v := m.Results[k]
+			baseI := i
+			if v != nil {
+				{
+					size, err := v.MarshalToSizedBuffer(dAtA[:i])
+					if err != nil {
+						return 0, err
+					}
+					i -= size
+					i = encodeVarintControl(dAtA, i, uint64(size))
+				}
+				i--
+				dAtA[i] = 0x12
+			}
+			i = encodeVarintControl(dAtA, i, uint64(k))
+			i--
+			dAtA[i] = 0x8
+			i = encodeVarintControl(dAtA, i, uint64(baseI-i))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
 	if len(m.Attestations) > 0 {
 		for iNdEx := len(m.Attestations) - 1; iNdEx >= 0; iNdEx-- {
 			{
@@ -4353,9 +4417,9 @@ func (m *BuildResultInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
 			dAtA[i] = 0x12
 		}
 	}
-	if m.Result != nil {
+	if m.ResultDeprecated != nil {
 		{
-			size, err := m.Result.MarshalToSizedBuffer(dAtA[:i])
+			size, err := m.ResultDeprecated.MarshalToSizedBuffer(dAtA[:i])
 			if err != nil {
 				return 0, err
 			}
@@ -4564,12 +4628,12 @@ func (m *SolveRequest) Size() (n int) {
 		l = m.Definition.Size()
 		n += 1 + l + sovControl(uint64(l))
 	}
-	l = len(m.Exporter)
+	l = len(m.ExporterDeprecated)
 	if l > 0 {
 		n += 1 + l + sovControl(uint64(l))
 	}
-	if len(m.ExporterAttrs) > 0 {
-		for k, v := range m.ExporterAttrs {
+	if len(m.ExporterAttrsDeprecated) > 0 {
+		for k, v := range m.ExporterAttrsDeprecated {
 			_ = k
 			_ = v
 			mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
@@ -4620,6 +4684,12 @@ func (m *SolveRequest) Size() (n int) {
 		l = m.SourcePolicy.Size()
 		n += 1 + l + sovControl(uint64(l))
 	}
+	if len(m.Exporters) > 0 {
+		for _, e := range m.Exporters {
+			l = e.Size()
+			n += 1 + l + sovControl(uint64(l))
+		}
+	}
 	if m.XXX_unrecognized != nil {
 		n += len(m.XXX_unrecognized)
 	}
@@ -5203,8 +5273,8 @@ func (m *BuildResultInfo) Size() (n int) {
 	}
 	var l int
 	_ = l
-	if m.Result != nil {
-		l = m.Result.Size()
+	if m.ResultDeprecated != nil {
+		l = m.ResultDeprecated.Size()
 		n += 1 + l + sovControl(uint64(l))
 	}
 	if len(m.Attestations) > 0 {
@@ -5213,6 +5283,19 @@ func (m *BuildResultInfo) Size() (n int) {
 			n += 1 + l + sovControl(uint64(l))
 		}
 	}
+	if len(m.Results) > 0 {
+		for k, v := range m.Results {
+			_ = k
+			_ = v
+			l = 0
+			if v != nil {
+				l = v.Size()
+				l += 1 + sovControl(uint64(l))
+			}
+			mapEntrySize := 1 + sovControl(uint64(k)) + l
+			n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+		}
+	}
 	if m.XXX_unrecognized != nil {
 		n += len(m.XXX_unrecognized)
 	}
@@ -6035,7 +6118,7 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
 			iNdEx = postIndex
 		case 3:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Exporter", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ExporterDeprecated", wireType)
 			}
 			var stringLen uint64
 			for shift := uint(0); ; shift += 7 {
@@ -6063,11 +6146,11 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Exporter = string(dAtA[iNdEx:postIndex])
+			m.ExporterDeprecated = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 4:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttrs", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttrsDeprecated", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -6094,8 +6177,8 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.ExporterAttrs == nil {
-				m.ExporterAttrs = make(map[string]string)
+			if m.ExporterAttrsDeprecated == nil {
+				m.ExporterAttrsDeprecated = make(map[string]string)
 			}
 			var mapkey string
 			var mapvalue string
@@ -6190,7 +6273,7 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
 					iNdEx += skippy
 				}
 			}
-			m.ExporterAttrs[mapkey] = mapvalue
+			m.ExporterAttrsDeprecated[mapkey] = mapvalue
 			iNdEx = postIndex
 		case 5:
 			if wireType != 2 {
@@ -6633,6 +6716,40 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Exporters", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowControl
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthControl
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthControl
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Exporters = append(m.Exporters, &Exporter{})
+			if err := m.Exporters[len(m.Exporters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipControl(dAtA[iNdEx:])
@@ -10589,7 +10706,7 @@ func (m *BuildResultInfo) Unmarshal(dAtA []byte) error {
 		switch fieldNum {
 		case 1:
 			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
+				return fmt.Errorf("proto: wrong wireType = %d for field ResultDeprecated", wireType)
 			}
 			var msglen int
 			for shift := uint(0); ; shift += 7 {
@@ -10616,10 +10733,10 @@ func (m *BuildResultInfo) Unmarshal(dAtA []byte) error {
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			if m.Result == nil {
-				m.Result = &Descriptor{}
+			if m.ResultDeprecated == nil {
+				m.ResultDeprecated = &Descriptor{}
 			}
-			if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+			if err := m.ResultDeprecated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -10657,6 +10774,121 @@ func (m *BuildResultInfo) Unmarshal(dAtA []byte) error {
 				return err
 			}
 			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowControl
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthControl
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthControl
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Results == nil {
+				m.Results = make(map[int64]*Descriptor)
+			}
+			var mapkey int64
+			var mapvalue *Descriptor
+			for iNdEx < postIndex {
+				entryPreIndex := iNdEx
+				var wire uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowControl
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					wire |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				fieldNum := int32(wire >> 3)
+				if fieldNum == 1 {
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowControl
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapkey |= int64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+				} else if fieldNum == 2 {
+					var mapmsglen int
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowControl
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						mapmsglen |= int(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					if mapmsglen < 0 {
+						return ErrInvalidLengthControl
+					}
+					postmsgIndex := iNdEx + mapmsglen
+					if postmsgIndex < 0 {
+						return ErrInvalidLengthControl
+					}
+					if postmsgIndex > l {
+						return io.ErrUnexpectedEOF
+					}
+					mapvalue = &Descriptor{}
+					if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+						return err
+					}
+					iNdEx = postmsgIndex
+				} else {
+					iNdEx = entryPreIndex
+					skippy, err := skipControl(dAtA[iNdEx:])
+					if err != nil {
+						return err
+					}
+					if (skippy < 0) || (iNdEx+skippy) < 0 {
+						return ErrInvalidLengthControl
+					}
+					if (iNdEx + skippy) > postIndex {
+						return io.ErrUnexpectedEOF
+					}
+					iNdEx += skippy
+				}
+			}
+			m.Results[mapkey] = mapvalue
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipControl(dAtA[iNdEx:])

+ 15 - 7
vendor/github.com/moby/buildkit/api/services/control/control.proto

@@ -2,13 +2,13 @@ syntax = "proto3";
 
 package moby.buildkit.v1;
 
-import "github.com/gogo/protobuf/gogoproto/gogo.proto";
-import "google/protobuf/timestamp.proto";
-import "github.com/moby/buildkit/solver/pb/ops.proto";
-import "github.com/moby/buildkit/api/types/worker.proto";
 // import "github.com/containerd/containerd/api/types/descriptor.proto";
 import "github.com/gogo/googleapis/google/rpc/status.proto";
+import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+import "github.com/moby/buildkit/api/types/worker.proto";
+import "github.com/moby/buildkit/solver/pb/ops.proto";
 import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto";
+import "google/protobuf/timestamp.proto";
 
 option (gogoproto.sizer_all) = true;
 option (gogoproto.marshaler_all) = true;
@@ -60,8 +60,11 @@ message UsageRecord {
 message SolveRequest {
 	string Ref = 1;
 	pb.Definition Definition = 2;
-	string Exporter = 3;
-	map<string, string> ExporterAttrs = 4;
+	// ExporterDeprecated and ExporterAttrsDeprecated are deprecated in favor
+	// of the new Exporters. If these fields are set, then they will be
+	// appended to the Exporters field if Exporters was not explicitly set.
+	string ExporterDeprecated = 3;
+	map<string, string> ExporterAttrsDeprecated = 4;
 	string Session = 5;
 	string Frontend = 6;
 	map<string, string> FrontendAttrs = 7;
@@ -70,6 +73,7 @@ message SolveRequest {
 	map<string, pb.Definition> FrontendInputs = 10;
 	bool Internal = 11; // Internal builds are not recorded in build history
 	moby.buildkit.v1.sourcepolicy.Policy SourcePolicy = 12;
+	repeated Exporter Exporters = 13;
 }
 
 message CacheOptions {
@@ -227,11 +231,15 @@ message Descriptor {
 }
 
 message BuildResultInfo {
-	Descriptor Result = 1;
+	Descriptor ResultDeprecated = 1;
 	repeated Descriptor Attestations = 2;
+	map<int64, Descriptor> Results = 3;
 }
 
+// Exporter describes the output exporter
 message Exporter {
+	// Type identifies the exporter
 	string Type = 1;
+	// Attrs specifies exporter configuration
 	map<string, string> Attrs = 2;
 }

+ 74 - 37
vendor/github.com/moby/buildkit/cache/blobs.go

@@ -8,12 +8,15 @@ import (
 
 	"github.com/containerd/containerd/diff"
 	"github.com/containerd/containerd/diff/walking"
+	"github.com/containerd/containerd/labels"
 	"github.com/containerd/containerd/leases"
 	"github.com/containerd/containerd/mount"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/util/bklog"
 	"github.com/moby/buildkit/util/compression"
+	"github.com/moby/buildkit/util/converter"
 	"github.com/moby/buildkit/util/flightcontrol"
+	"github.com/moby/buildkit/util/leaseutil"
 	"github.com/moby/buildkit/util/winlayers"
 	digest "github.com/opencontainers/go-digest"
 	imagespecidentity "github.com/opencontainers/image-spec/identity"
@@ -22,11 +25,9 @@ import (
 	"golang.org/x/sync/errgroup"
 )
 
-var g flightcontrol.Group[struct{}]
+var g flightcontrol.Group[*leaseutil.LeaseRef]
 var gFileList flightcontrol.Group[[]string]
 
-const containerdUncompressed = "containerd.io/uncompressed"
-
 var ErrNoBlobs = errors.Errorf("no blobs for snapshot")
 
 // computeBlobChain ensures every ref in a parent chain has an associated blob in the content store. If
@@ -87,13 +88,23 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 
 	if _, ok := filter[sr.ID()]; ok {
 		eg.Go(func() error {
-			_, err := g.Do(ctx, fmt.Sprintf("%s-%t", sr.ID(), createIfNeeded), func(ctx context.Context) (struct{}, error) {
+			l, err := g.Do(ctx, fmt.Sprintf("%s-%t", sr.ID(), createIfNeeded), func(ctx context.Context) (_ *leaseutil.LeaseRef, err error) {
 				if sr.getBlob() != "" {
-					return struct{}{}, nil
+					return nil, nil
 				}
 				if !createIfNeeded {
-					return struct{}{}, errors.WithStack(ErrNoBlobs)
+					return nil, errors.WithStack(ErrNoBlobs)
+				}
+
+				l, ctx, err := leaseutil.NewLease(ctx, sr.cm.LeaseManager, leaseutil.MakeTemporary)
+				if err != nil {
+					return nil, err
 				}
+				defer func() {
+					if err != nil {
+						l.Discard()
+					}
+				}()
 
 				compressorFunc, finalize := comp.Type.Compress(ctx, comp)
 				mediaType := comp.Type.MediaType()
@@ -109,12 +120,12 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 				if lowerRef != nil {
 					m, err := lowerRef.Mount(ctx, true, s)
 					if err != nil {
-						return struct{}{}, err
+						return nil, err
 					}
 					var release func() error
 					lower, release, err = m.Mount()
 					if err != nil {
-						return struct{}{}, err
+						return nil, err
 					}
 					if release != nil {
 						defer release()
@@ -132,12 +143,12 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 				if upperRef != nil {
 					m, err := upperRef.Mount(ctx, true, s)
 					if err != nil {
-						return struct{}{}, err
+						return nil, err
 					}
 					var release func() error
 					upper, release, err = m.Mount()
 					if err != nil {
-						return struct{}{}, err
+						return nil, err
 					}
 					if release != nil {
 						defer release()
@@ -145,14 +156,13 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 				}
 
 				var desc ocispecs.Descriptor
-				var err error
 
 				// Determine differ and error/log handling according to the platform, envvar and the snapshotter.
 				var enableOverlay, fallback, logWarnOnErr bool
 				if forceOvlStr := os.Getenv("BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF"); forceOvlStr != "" && sr.kind() != Diff {
 					enableOverlay, err = strconv.ParseBool(forceOvlStr)
 					if err != nil {
-						return struct{}{}, errors.Wrapf(err, "invalid boolean in BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF")
+						return nil, errors.Wrapf(err, "invalid boolean in BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF")
 					}
 					fallback = false // prohibit fallback on debug
 				} else if !isTypeWindows(sr) {
@@ -174,10 +184,10 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 					if !ok || err != nil {
 						if !fallback {
 							if !ok {
-								return struct{}{}, errors.Errorf("overlay mounts not detected (lower=%+v,upper=%+v)", lower, upper)
+								return nil, errors.Errorf("overlay mounts not detected (lower=%+v,upper=%+v)", lower, upper)
 							}
 							if err != nil {
-								return struct{}{}, errors.Wrapf(err, "failed to compute overlay diff")
+								return nil, errors.Wrapf(err, "failed to compute overlay diff")
 							}
 						}
 						if logWarnOnErr {
@@ -189,7 +199,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 					}
 				}
 
-				if desc.Digest == "" && !isTypeWindows(sr) && comp.Type.NeedsComputeDiffBySelf() {
+				if desc.Digest == "" && !isTypeWindows(sr) && comp.Type.NeedsComputeDiffBySelf(comp) {
 					// These compression types aren't supported by containerd differ. So try to compute diff on buildkit side.
 					// This case can be happen on containerd worker + non-overlayfs snapshotter (e.g. native).
 					// See also: https://github.com/containerd/containerd/issues/4263
@@ -210,7 +220,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 						diff.WithCompressor(compressorFunc),
 					)
 					if err != nil {
-						return struct{}{}, err
+						return nil, err
 					}
 				}
 
@@ -220,7 +230,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 				if finalize != nil {
 					a, err := finalize(ctx, sr.cm.ContentStore)
 					if err != nil {
-						return struct{}{}, errors.Wrapf(err, "failed to finalize compression")
+						return nil, errors.Wrapf(err, "failed to finalize compression")
 					}
 					for k, v := range a {
 						desc.Annotations[k] = v
@@ -228,26 +238,32 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 				}
 				info, err := sr.cm.ContentStore.Info(ctx, desc.Digest)
 				if err != nil {
-					return struct{}{}, err
+					return nil, err
 				}
 
-				if diffID, ok := info.Labels[containerdUncompressed]; ok {
-					desc.Annotations[containerdUncompressed] = diffID
+				if diffID, ok := info.Labels[labels.LabelUncompressed]; ok {
+					desc.Annotations[labels.LabelUncompressed] = diffID
 				} else if mediaType == ocispecs.MediaTypeImageLayer {
-					desc.Annotations[containerdUncompressed] = desc.Digest.String()
+					desc.Annotations[labels.LabelUncompressed] = desc.Digest.String()
 				} else {
-					return struct{}{}, errors.Errorf("unknown layer compression type")
+					return nil, errors.Errorf("unknown layer compression type")
 				}
 
 				if err := sr.setBlob(ctx, desc); err != nil {
-					return struct{}{}, err
+					return nil, err
 				}
-				return struct{}{}, nil
+				return l, nil
 			})
 			if err != nil {
 				return err
 			}
 
+			if l != nil {
+				if err := l.Adopt(ctx); err != nil {
+					return err
+				}
+			}
+
 			if comp.Force {
 				if err := ensureCompression(ctx, sr, comp, s); err != nil {
 					return errors.Wrapf(err, "failed to ensure compression type of %q", comp.Type)
@@ -416,29 +432,42 @@ func isTypeWindows(sr *immutableRef) bool {
 
 // ensureCompression ensures the specified ref has the blob of the specified compression Type.
 func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.Config, s session.Group) error {
-	_, err := g.Do(ctx, fmt.Sprintf("ensureComp-%s-%s", ref.ID(), comp.Type), func(ctx context.Context) (struct{}, error) {
+	l, err := g.Do(ctx, fmt.Sprintf("ensureComp-%s-%s", ref.ID(), comp.Type), func(ctx context.Context) (_ *leaseutil.LeaseRef, err error) {
 		desc, err := ref.ociDesc(ctx, ref.descHandlers, true)
 		if err != nil {
-			return struct{}{}, err
+			return nil, err
 		}
 
+		l, ctx, err := leaseutil.NewLease(ctx, ref.cm.LeaseManager, leaseutil.MakeTemporary)
+		if err != nil {
+			return nil, err
+		}
+		defer func() {
+			if err != nil {
+				l.Discard()
+			}
+		}()
+
 		// Resolve converters
-		layerConvertFunc, err := getConverter(ctx, ref.cm.ContentStore, desc, comp)
+		layerConvertFunc, err := converter.New(ctx, ref.cm.ContentStore, desc, comp)
 		if err != nil {
-			return struct{}{}, err
+			return nil, err
 		} else if layerConvertFunc == nil {
 			if isLazy, err := ref.isLazy(ctx); err != nil {
-				return struct{}{}, err
+				return nil, err
 			} else if isLazy {
 				// This ref can be used as the specified compressionType. Keep it lazy.
-				return struct{}{}, nil
+				return l, nil
 			}
-			return struct{}{}, ref.linkBlob(ctx, desc)
+			if err := ref.linkBlob(ctx, desc); err != nil {
+				return nil, err
+			}
+			return l, nil
 		}
 
 		// First, lookup local content store
 		if _, err := ref.getBlobWithCompression(ctx, comp.Type); err == nil {
-			return struct{}{}, nil // found the compression variant. no need to convert.
+			return l, nil // found the compression variant. no need to convert.
 		}
 
 		// Convert layer compression type
@@ -448,18 +477,26 @@ func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.
 			dh:      ref.descHandlers[desc.Digest],
 			session: s,
 		}).Unlazy(ctx); err != nil {
-			return struct{}{}, err
+			return l, err
 		}
 		newDesc, err := layerConvertFunc(ctx, ref.cm.ContentStore, desc)
 		if err != nil {
-			return struct{}{}, errors.Wrapf(err, "failed to convert")
+			return nil, errors.Wrapf(err, "failed to convert")
 		}
 
 		// Start to track converted layer
 		if err := ref.linkBlob(ctx, *newDesc); err != nil {
-			return struct{}{}, errors.Wrapf(err, "failed to add compression blob")
+			return nil, errors.Wrapf(err, "failed to add compression blob")
 		}
-		return struct{}{}, nil
+		return l, nil
 	})
-	return err
+	if err != nil {
+		return err
+	}
+	if l != nil {
+		if err := l.Adopt(ctx); err != nil {
+			return err
+		}
+	}
+	return nil
 }

+ 18 - 4
vendor/github.com/moby/buildkit/cache/blobs_linux.go

@@ -10,6 +10,7 @@ import (
 
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/errdefs"
+	labelspkg "github.com/containerd/containerd/labels"
 	"github.com/containerd/containerd/mount"
 	"github.com/moby/buildkit/util/bklog"
 	"github.com/moby/buildkit/util/compression"
@@ -42,14 +43,27 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper
 	if err != nil {
 		return emptyDesc, false, errors.Wrap(err, "failed to open writer")
 	}
+
 	defer func() {
 		if cw != nil {
+			// after commit success cw will be set to nil, if cw isn't nil, error
+			// happened before commit, we should abort this ingest, and because the
+			// error may incured by ctx cancel, use a new context here. And since
+			// cm.Close will unlock this ref in the content store, we invoke abort
+			// to remove the ingest root in advance.
+			if aerr := sr.cm.ContentStore.Abort(context.Background(), ref); aerr != nil {
+				bklog.G(ctx).WithError(aerr).Warnf("failed to abort writer %q", ref)
+			}
 			if cerr := cw.Close(); cerr != nil {
 				bklog.G(ctx).WithError(cerr).Warnf("failed to close writer %q", ref)
 			}
 		}
 	}()
 
+	if err = cw.Truncate(0); err != nil {
+		return emptyDesc, false, errors.Wrap(err, "failed to truncate writer")
+	}
+
 	bufW := bufio.NewWriterSize(cw, 128*1024)
 	var labels map[string]string
 	if compressorFunc != nil {
@@ -69,7 +83,7 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper
 		if labels == nil {
 			labels = map[string]string{}
 		}
-		labels[containerdUncompressed] = dgstr.Digest().String()
+		labels[labelspkg.LabelUncompressed] = dgstr.Digest().String()
 	} else {
 		if err = overlay.WriteUpperdir(ctx, bufW, upperdir, lower); err != nil {
 			return emptyDesc, false, errors.Wrap(err, "failed to write diff")
@@ -101,9 +115,9 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper
 		cinfo.Labels = make(map[string]string)
 	}
 	// Set uncompressed label if digest already existed without label
-	if _, ok := cinfo.Labels[containerdUncompressed]; !ok {
-		cinfo.Labels[containerdUncompressed] = labels[containerdUncompressed]
-		if _, err := sr.cm.ContentStore.Update(ctx, cinfo, "labels."+containerdUncompressed); err != nil {
+	if _, ok := cinfo.Labels[labelspkg.LabelUncompressed]; !ok {
+		cinfo.Labels[labelspkg.LabelUncompressed] = labels[labelspkg.LabelUncompressed]
+		if _, err := sr.cm.ContentStore.Update(ctx, cinfo, "labels."+labelspkg.LabelUncompressed); err != nil {
 			return emptyDesc, false, errors.Wrap(err, "error setting uncompressed label")
 		}
 	}

+ 1 - 1
vendor/github.com/moby/buildkit/cache/blobs_nolinux.go

@@ -6,8 +6,8 @@ package cache
 import (
 	"context"
 
-	"github.com/moby/buildkit/util/compression"
 	"github.com/containerd/containerd/mount"
+	"github.com/moby/buildkit/util/compression"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 )

+ 0 - 16
vendor/github.com/moby/buildkit/cache/compression.go

@@ -1,16 +0,0 @@
-//go:build !nydus
-// +build !nydus
-
-package cache
-
-import (
-	"context"
-
-	"github.com/containerd/containerd/content"
-	"github.com/moby/buildkit/cache/config"
-	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
-)
-
-func needsForceCompression(ctx context.Context, cs content.Store, source ocispecs.Descriptor, refCfg config.RefConfig) bool {
-	return refCfg.Compression.Force
-}

+ 3 - 19
vendor/github.com/moby/buildkit/cache/compression_nydus.go

@@ -10,7 +10,7 @@ import (
 
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/errdefs"
-	"github.com/moby/buildkit/cache/config"
+	"github.com/containerd/containerd/labels"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/util/compression"
 	digest "github.com/opencontainers/go-digest"
@@ -27,20 +27,6 @@ func init() {
 	)
 }
 
-// Nydus compression type can't be mixed with other compression types in the same image,
-// so if `source` is this kind of layer, but the target is other compression type, we
-// should do the forced compression.
-func needsForceCompression(ctx context.Context, cs content.Store, source ocispecs.Descriptor, refCfg config.RefConfig) bool {
-	if refCfg.Compression.Force {
-		return true
-	}
-	isNydusBlob, _ := compression.Nydus.Is(ctx, cs, source)
-	if refCfg.Compression.Type == compression.Nydus {
-		return !isNydusBlob
-	}
-	return isNydusBlob
-}
-
 // MergeNydus does two steps:
 // 1. Extracts nydus bootstrap from nydus format (nydus blob + nydus bootstrap) for each layer.
 // 2. Merge all nydus bootstraps into a final bootstrap (will as an extra layer).
@@ -58,7 +44,6 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config,
 	// Extracts nydus bootstrap from nydus format for each layer.
 	var cm *cacheManager
 	layers := []converter.Layer{}
-	blobIDs := []string{}
 	for _, ref := range refs {
 		blobDesc, err := getBlobWithCompressionWithRetry(ctx, ref, comp, s)
 		if err != nil {
@@ -72,7 +57,6 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config,
 		if cm == nil {
 			cm = ref.cm
 		}
-		blobIDs = append(blobIDs, blobDesc.Digest.Hex())
 		layers = append(layers, converter.Layer{
 			Digest:   blobDesc.Digest,
 			ReaderAt: ra,
@@ -109,7 +93,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config,
 
 	compressedDgst := cw.Digest()
 	if err := cw.Commit(ctx, 0, compressedDgst, content.WithLabels(map[string]string{
-		containerdUncompressed: uncompressedDgst.Digest().String(),
+		labels.LabelUncompressed: uncompressedDgst.Digest().String(),
 	})); err != nil {
 		if !errdefs.IsAlreadyExists(err) {
 			return nil, errors.Wrap(err, "commit to content store")
@@ -129,7 +113,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config,
 		Size:      info.Size,
 		MediaType: ocispecs.MediaTypeImageLayerGzip,
 		Annotations: map[string]string{
-			containerdUncompressed: uncompressedDgst.Digest().String(),
+			labels.LabelUncompressed: uncompressedDgst.Digest().String(),
 			// Use this annotation to identify nydus bootstrap layer.
 			converter.LayerAnnotationNydusBootstrap: "true",
 		},

+ 2 - 2
vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go

@@ -20,8 +20,8 @@ func setUnixOpt(path string, fi os.FileInfo, stat *fstypes.Stat) error {
 	stat.Gid = s.Gid
 
 	if !fi.IsDir() {
-		if s.Mode&syscall.S_IFBLK != 0 ||
-			s.Mode&syscall.S_IFCHR != 0 {
+		if s.Mode&syscall.S_IFLNK == 0 && (s.Mode&syscall.S_IFBLK != 0 ||
+			s.Mode&syscall.S_IFCHR != 0) {
 			stat.Devmajor = int64(unix.Major(uint64(s.Rdev)))
 			stat.Devminor = int64(unix.Minor(uint64(s.Rdev)))
 		}

+ 1 - 1
vendor/github.com/moby/buildkit/cache/filelist.go

@@ -35,7 +35,7 @@ func (sr *immutableRef) FileList(ctx context.Context, s session.Group) ([]string
 		}
 
 		// lazy blobs need to be pulled first
-		if err := sr.Extract(ctx, s); err != nil {
+		if err := sr.ensureLocalContentBlob(ctx, s); err != nil {
 			return nil, err
 		}
 

+ 41 - 36
vendor/github.com/moby/buildkit/cache/manager.go

@@ -13,6 +13,7 @@ import (
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/filters"
 	"github.com/containerd/containerd/gc"
+	"github.com/containerd/containerd/labels"
 	"github.com/containerd/containerd/leases"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/moby/buildkit/cache/metadata"
@@ -36,6 +37,8 @@ var (
 	errInvalid  = errors.New("invalid")
 )
 
+const maxPruneBatch = 10 // maximum number of refs to prune while holding the manager lock
+
 type ManagerOpt struct {
 	Snapshotter     snapshot.Snapshotter
 	ContentStore    content.Store
@@ -300,7 +303,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor,
 
 	ref := rec.ref(true, descHandlers, nil)
 	if s := unlazySessionOf(opts...); s != nil {
-		if err := ref.unlazy(ctx, ref.descHandlers, ref.progress, s, true); err != nil {
+		if err := ref.unlazy(ctx, ref.descHandlers, ref.progress, s, true, false); err != nil {
 			return nil, err
 		}
 	}
@@ -321,6 +324,7 @@ func (cm *cacheManager) init(ctx context.Context) error {
 			bklog.G(ctx).Debugf("could not load snapshot %s: %+v", si.ID(), err)
 			cm.MetadataStore.Clear(si.ID())
 			cm.LeaseManager.Delete(ctx, leases.Lease{ID: si.ID()})
+			cm.LeaseManager.Delete(ctx, leases.Lease{ID: si.ID() + "-variants"})
 		}
 	}
 	return nil
@@ -1055,7 +1059,7 @@ func (cm *cacheManager) pruneOnce(ctx context.Context, ch chan client.UsageInfo,
 	})
 }
 
-func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt pruneOpt) error {
+func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt pruneOpt) (err error) {
 	var toDelete []*deleteRecord
 
 	if opt.keepBytes != 0 && opt.totalSize < opt.keepBytes {
@@ -1128,48 +1132,49 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt
 					lastUsedAt:  c.LastUsedAt,
 					usageCount:  c.UsageCount,
 				})
-				if !gcMode {
-					cr.dead = true
-
-					// mark metadata as deleted in case we crash before cleanup finished
-					if err := cr.queueDeleted(); err != nil {
-						cr.mu.Unlock()
-						cm.mu.Unlock()
-						return err
-					}
-					if err := cr.commitMetadata(); err != nil {
-						cr.mu.Unlock()
-						cm.mu.Unlock()
-						return err
-					}
-				} else {
-					locked[cr.mu] = struct{}{}
-					continue // leave the record locked
-				}
+				locked[cr.mu] = struct{}{}
+				continue // leave the record locked
 			}
 		}
 		cr.mu.Unlock()
 	}
 
+	batchSize := len(toDelete)
 	if gcMode && len(toDelete) > 0 {
+		batchSize = 1
 		sortDeleteRecords(toDelete)
-		var err error
-		for i, cr := range toDelete {
-			// only remove single record at a time
-			if i == 0 {
-				cr.dead = true
-				err = cr.queueDeleted()
-				if err == nil {
-					err = cr.commitMetadata()
-				}
+	} else if batchSize > maxPruneBatch {
+		batchSize = maxPruneBatch
+	}
+
+	releaseLocks := func() {
+		for _, cr := range toDelete {
+			if !cr.released {
+				cr.released = true
+				cr.mu.Unlock()
 			}
-			cr.mu.Unlock()
 		}
-		if err != nil {
-			return err
+		cm.mu.Unlock()
+	}
+
+	for i, cr := range toDelete {
+		// only remove single record at a time
+		if i < batchSize {
+			cr.dead = true
+			// mark metadata as deleted in case we crash before cleanup finished
+			if err := cr.queueDeleted(); err != nil {
+				releaseLocks()
+				return err
+			}
+			if err := cr.commitMetadata(); err != nil {
+				releaseLocks()
+				return err
+			}
 		}
-		toDelete = toDelete[:1]
+		cr.mu.Unlock()
+		cr.released = true
 	}
+	toDelete = toDelete[:batchSize]
 
 	cm.mu.Unlock()
 
@@ -1193,7 +1198,6 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt
 	}
 
 	cm.mu.Lock()
-	var err error
 	for _, cr := range toDelete {
 		cr.mu.Lock()
 
@@ -1254,7 +1258,7 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt
 
 	select {
 	case <-ctx.Done():
-		return ctx.Err()
+		return context.Cause(ctx)
 	default:
 		return cm.prune(ctx, ch, opt)
 	}
@@ -1611,6 +1615,7 @@ type deleteRecord struct {
 	usageCount      int
 	lastUsedAtIndex int
 	usageCountIndex int
+	released        bool
 }
 
 func sortDeleteRecords(toDelete []*deleteRecord) {
@@ -1657,7 +1662,7 @@ func sortDeleteRecords(toDelete []*deleteRecord) {
 }
 
 func diffIDFromDescriptor(desc ocispecs.Descriptor) (digest.Digest, error) {
-	diffIDStr, ok := desc.Annotations["containerd.io/uncompressed"]
+	diffIDStr, ok := desc.Annotations[labels.LabelUncompressed]
 	if !ok {
 		return "", errors.Errorf("missing uncompressed annotation for %s", desc.Digest)
 	}

+ 54 - 25
vendor/github.com/moby/buildkit/cache/refs.go

@@ -12,6 +12,7 @@ import (
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/labels"
 	"github.com/containerd/containerd/leases"
 	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/pkg/userns"
@@ -39,7 +40,7 @@ import (
 	"golang.org/x/sync/errgroup"
 )
 
-var additionalAnnotations = append(compression.EStargzAnnotations, containerdUncompressed)
+var additionalAnnotations = append(compression.EStargzAnnotations, labels.LabelUncompressed)
 
 // Ref is a reference to cacheable objects.
 type Ref interface {
@@ -443,7 +444,7 @@ func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) (rerr er
 			"id":             cr.ID(),
 			"refCount":       len(cr.refs),
 			"removeSnapshot": removeSnapshot,
-			"stack":          bklog.LazyStackTrace{},
+			"stack":          bklog.TraceLevelOnlyStack(),
 		})
 		if rerr != nil {
 			l = l.WithError(rerr)
@@ -487,7 +488,7 @@ func (sr *immutableRef) traceLogFields() logrus.Fields {
 		"refID":       fmt.Sprintf("%p", sr),
 		"newRefCount": len(sr.refs),
 		"mutable":     false,
-		"stack":       bklog.LazyStackTrace{},
+		"stack":       bklog.TraceLevelOnlyStack(),
 	}
 	if sr.equalMutable != nil {
 		m["equalMutableID"] = sr.equalMutable.ID()
@@ -627,7 +628,7 @@ func (sr *mutableRef) traceLogFields() logrus.Fields {
 		"refID":       fmt.Sprintf("%p", sr),
 		"newRefCount": len(sr.refs),
 		"mutable":     true,
-		"stack":       bklog.LazyStackTrace{},
+		"stack":       bklog.TraceLevelOnlyStack(),
 	}
 	if sr.equalMutable != nil {
 		m["equalMutableID"] = sr.equalMutable.ID()
@@ -733,7 +734,7 @@ func (sr *immutableRef) ociDesc(ctx context.Context, dhs DescHandlers, preferNon
 
 	diffID := sr.getDiffID()
 	if diffID != "" {
-		desc.Annotations["containerd.io/uncompressed"] = string(diffID)
+		desc.Annotations[labels.LabelUncompressed] = string(diffID)
 	}
 
 	createdAt := sr.GetCreatedAt()
@@ -991,6 +992,14 @@ func (sr *immutableRef) Mount(ctx context.Context, readonly bool, s session.Grou
 	return mnt, nil
 }
 
+func (sr *immutableRef) ensureLocalContentBlob(ctx context.Context, s session.Group) error {
+	if (sr.kind() == Layer || sr.kind() == BaseLayer) && !sr.getBlobOnly() {
+		return nil
+	}
+
+	return sr.unlazy(ctx, sr.descHandlers, sr.progress, s, true, true)
+}
+
 func (sr *immutableRef) Extract(ctx context.Context, s session.Group) (rerr error) {
 	if (sr.kind() == Layer || sr.kind() == BaseLayer) && !sr.getBlobOnly() {
 		return nil
@@ -1001,14 +1010,14 @@ func (sr *immutableRef) Extract(ctx context.Context, s session.Group) (rerr erro
 			if rerr = sr.prepareRemoteSnapshotsStargzMode(ctx, s); rerr != nil {
 				return
 			}
-			rerr = sr.unlazy(ctx, sr.descHandlers, sr.progress, s, true)
+			rerr = sr.unlazy(ctx, sr.descHandlers, sr.progress, s, true, false)
 		}); err != nil {
 			return err
 		}
 		return rerr
 	}
 
-	return sr.unlazy(ctx, sr.descHandlers, sr.progress, s, true)
+	return sr.unlazy(ctx, sr.descHandlers, sr.progress, s, true, false)
 }
 
 func (sr *immutableRef) withRemoteSnapshotLabelsStargzMode(ctx context.Context, s session.Group, f func()) error {
@@ -1053,7 +1062,7 @@ func (sr *immutableRef) withRemoteSnapshotLabelsStargzMode(ctx context.Context,
 }
 
 func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s session.Group) error {
-	_, err := g.Do(ctx, sr.ID()+"-prepare-remote-snapshot", func(ctx context.Context) (_ struct{}, rerr error) {
+	_, err := g.Do(ctx, sr.ID()+"-prepare-remote-snapshot", func(ctx context.Context) (_ *leaseutil.LeaseRef, rerr error) {
 		dhs := sr.descHandlers
 		for _, r := range sr.layerChain() {
 			r := r
@@ -1065,7 +1074,7 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s
 			dh := dhs[digest.Digest(r.getBlob())]
 			if dh == nil {
 				// We cannot prepare remote snapshots without descHandler.
-				return struct{}{}, nil
+				return nil, nil
 			}
 
 			// tmpLabels contains dh.SnapshotLabels + session IDs. All keys contain
@@ -1098,8 +1107,17 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s
 					if err == nil { // usable as remote snapshot without unlazying.
 						defer func() {
 							// Remove tmp labels appended in this func
-							for k := range tmpLabels {
-								info.Labels[k] = ""
+							if info.Labels != nil {
+								for k := range tmpLabels {
+									info.Labels[k] = ""
+								}
+							} else {
+								// We are logging here to track to try to debug when and why labels are nil.
+								// Log can be removed when not happening anymore.
+								bklog.G(ctx).
+									WithField("snapshotID", snapshotID).
+									WithField("name", info.Name).
+									Debug("snapshots exist but labels are nil")
 							}
 							if _, err := r.cm.Snapshotter.Update(ctx, info, tmpFields...); err != nil {
 								bklog.G(ctx).Warn(errors.Wrapf(err,
@@ -1117,7 +1135,7 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s
 			break
 		}
 
-		return struct{}{}, nil
+		return nil, nil
 	})
 	return err
 }
@@ -1139,25 +1157,36 @@ func makeTmpLabelsStargzMode(labels map[string]string, s session.Group) (fields
 	return
 }
 
-func (sr *immutableRef) unlazy(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, topLevel bool) error {
-	_, err := g.Do(ctx, sr.ID()+"-unlazy", func(ctx context.Context) (_ struct{}, rerr error) {
+func (sr *immutableRef) unlazy(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, topLevel bool, ensureContentStore bool) error {
+	_, err := g.Do(ctx, sr.ID()+"-unlazy", func(ctx context.Context) (_ *leaseutil.LeaseRef, rerr error) {
 		if _, err := sr.cm.Snapshotter.Stat(ctx, sr.getSnapshotID()); err == nil {
-			return struct{}{}, nil
+			if !ensureContentStore {
+				return nil, nil
+			}
+			if blob := sr.getBlob(); blob == "" {
+				return nil, nil
+			}
+			if _, err := sr.cm.ContentStore.Info(ctx, sr.getBlob()); err == nil {
+				return nil, nil
+			}
 		}
 
 		switch sr.kind() {
 		case Merge, Diff:
-			return struct{}{}, sr.unlazyDiffMerge(ctx, dhs, pg, s, topLevel)
+			return nil, sr.unlazyDiffMerge(ctx, dhs, pg, s, topLevel, ensureContentStore)
 		case Layer, BaseLayer:
-			return struct{}{}, sr.unlazyLayer(ctx, dhs, pg, s)
+			return nil, sr.unlazyLayer(ctx, dhs, pg, s, ensureContentStore)
 		}
-		return struct{}{}, nil
+		return nil, nil
 	})
-	return err
+	if err != nil {
+		return err
+	}
+	return nil
 }
 
 // should be called within sizeG.Do call for this ref's ID
-func (sr *immutableRef) unlazyDiffMerge(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, topLevel bool) (rerr error) {
+func (sr *immutableRef) unlazyDiffMerge(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, topLevel bool, ensureContentStore bool) (rerr error) {
 	eg, egctx := errgroup.WithContext(ctx)
 	var diffs []snapshot.Diff
 	sr.layerWalk(func(sr *immutableRef) {
@@ -1167,13 +1196,13 @@ func (sr *immutableRef) unlazyDiffMerge(ctx context.Context, dhs DescHandlers, p
 			if sr.diffParents.lower != nil {
 				diff.Lower = sr.diffParents.lower.getSnapshotID()
 				eg.Go(func() error {
-					return sr.diffParents.lower.unlazy(egctx, dhs, pg, s, false)
+					return sr.diffParents.lower.unlazy(egctx, dhs, pg, s, false, ensureContentStore)
 				})
 			}
 			if sr.diffParents.upper != nil {
 				diff.Upper = sr.diffParents.upper.getSnapshotID()
 				eg.Go(func() error {
-					return sr.diffParents.upper.unlazy(egctx, dhs, pg, s, false)
+					return sr.diffParents.upper.unlazy(egctx, dhs, pg, s, false, ensureContentStore)
 				})
 			}
 		case Layer:
@@ -1182,7 +1211,7 @@ func (sr *immutableRef) unlazyDiffMerge(ctx context.Context, dhs DescHandlers, p
 		case BaseLayer:
 			diff.Upper = sr.getSnapshotID()
 			eg.Go(func() error {
-				return sr.unlazy(egctx, dhs, pg, s, false)
+				return sr.unlazy(egctx, dhs, pg, s, false, ensureContentStore)
 			})
 		}
 		diffs = append(diffs, diff)
@@ -1213,7 +1242,7 @@ func (sr *immutableRef) unlazyDiffMerge(ctx context.Context, dhs DescHandlers, p
 }
 
 // should be called within sizeG.Do call for this ref's ID
-func (sr *immutableRef) unlazyLayer(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group) (rerr error) {
+func (sr *immutableRef) unlazyLayer(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, ensureContentStore bool) (rerr error) {
 	if !sr.getBlobOnly() {
 		return nil
 	}
@@ -1240,7 +1269,7 @@ func (sr *immutableRef) unlazyLayer(ctx context.Context, dhs DescHandlers, pg pr
 	parentID := ""
 	if sr.layerParent != nil {
 		eg.Go(func() error {
-			if err := sr.layerParent.unlazy(egctx, dhs, pg, s, false); err != nil {
+			if err := sr.layerParent.unlazy(egctx, dhs, pg, s, false, ensureContentStore); err != nil {
 				return err
 			}
 			parentID = sr.layerParent.getSnapshotID()

+ 1 - 1
vendor/github.com/moby/buildkit/cache/remote.go

@@ -212,7 +212,7 @@ func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, refC
 			}
 		}
 
-		if needsForceCompression(ctx, sr.cm.ContentStore, desc, refCfg) {
+		if refCfg.Compression.Force {
 			if needs, err := refCfg.Compression.Type.NeedsConversion(ctx, sr.cm.ContentStore, desc); err != nil {
 				return nil, err
 			} else if needs {

+ 6 - 0
vendor/github.com/moby/buildkit/cache/remotecache/export.go

@@ -11,6 +11,7 @@ import (
 	v1 "github.com/moby/buildkit/cache/remotecache/v1"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/solver"
+	"github.com/moby/buildkit/util/bklog"
 	"github.com/moby/buildkit/util/compression"
 	"github.com/moby/buildkit/util/contentutil"
 	"github.com/moby/buildkit/util/progress"
@@ -185,6 +186,11 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string
 		return nil, err
 	}
 
+	if len(config.Layers) == 0 {
+		bklog.G(ctx).Warn("failed to match any cache with layers")
+		return nil, progress.OneOff(ctx, "skipping cache export for empty result")(nil)
+	}
+
 	cache, err := NewExportableCache(ce.oci, ce.imageManifest)
 	if err != nil {
 		return nil, err

+ 3 - 2
vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go

@@ -11,6 +11,7 @@ import (
 	"time"
 
 	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/labels"
 	"github.com/moby/buildkit/cache/remotecache"
 	v1 "github.com/moby/buildkit/cache/remotecache/v1"
 	"github.com/moby/buildkit/session"
@@ -133,7 +134,7 @@ func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) {
 			return nil, errors.Errorf("invalid descriptor without annotations")
 		}
 		var diffID digest.Digest
-		v, ok := dgstPair.Descriptor.Annotations["containerd.io/uncompressed"]
+		v, ok := dgstPair.Descriptor.Annotations[labels.LabelUncompressed]
 		if !ok {
 			return nil, errors.Errorf("invalid descriptor without uncompressed annotation")
 		}
@@ -226,7 +227,7 @@ func (ci *importer) makeDescriptorProviderPair(l v1.CacheLayer) (*v1.DescriptorP
 	if l.Annotations.DiffID == "" {
 		return nil, errors.Errorf("cache layer with missing diffid")
 	}
-	annotations["containerd.io/uncompressed"] = l.Annotations.DiffID.String()
+	annotations[labels.LabelUncompressed] = l.Annotations.DiffID.String()
 	if !l.Annotations.CreatedAt.IsZero() {
 		txt, err := l.Annotations.CreatedAt.MarshalText()
 		if err != nil {

+ 2 - 1
vendor/github.com/moby/buildkit/cache/remotecache/import.go

@@ -10,6 +10,7 @@ import (
 
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/labels"
 	v1 "github.com/moby/buildkit/cache/remotecache/v1"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/solver"
@@ -221,7 +222,7 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte
 					if createdBy := createdMsg[i]; createdBy != "" {
 						m.Annotations["buildkit/description"] = createdBy
 					}
-					m.Annotations["containerd.io/uncompressed"] = img.Rootfs.DiffIDs[i].String()
+					m.Annotations[labels.LabelUncompressed] = img.Rootfs.DiffIDs[i].String()
 					layers[m.Digest] = v1.DescriptorProviderPair{
 						Descriptor: m,
 						Provider:   ci.provider,

+ 3 - 2
vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go

@@ -1,9 +1,10 @@
-package registry
+package inline
 
 import (
 	"context"
 	"encoding/json"
 
+	"github.com/containerd/containerd/labels"
 	"github.com/moby/buildkit/cache/remotecache"
 	v1 "github.com/moby/buildkit/cache/remotecache/v1"
 	"github.com/moby/buildkit/session"
@@ -67,7 +68,7 @@ func (ce *exporter) ExportForLayers(ctx context.Context, layers []digest.Digest)
 		}
 		// fallback for uncompressed digests
 		for _, v := range descs {
-			if uc := v.Descriptor.Annotations["containerd.io/uncompressed"]; uc == string(k) {
+			if uc := v.Descriptor.Annotations[labels.LabelUncompressed]; uc == string(k) {
 				descs2[v.Descriptor.Digest] = v
 				layerBlobDigests[i] = v.Descriptor.Digest
 			}

+ 3 - 2
vendor/github.com/moby/buildkit/cache/remotecache/local/local.go

@@ -105,8 +105,9 @@ func getContentStore(ctx context.Context, sm *session.Manager, g session.Group,
 	if sessionID == "" {
 		return nil, errors.New("local cache exporter/importer requires session")
 	}
-	timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
-	defer cancel()
+	timeoutCtx, cancel := context.WithCancelCause(context.Background())
+	timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 5*time.Second, errors.WithStack(context.DeadlineExceeded))
+	defer cancel(errors.WithStack(context.Canceled))
 
 	caller, err := sm.Get(timeoutCtx, sessionID, false)
 	if err != nil {

+ 1 - 1
vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go

@@ -7,7 +7,7 @@ import (
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/remotes/docker"
 	"github.com/containerd/containerd/snapshots"
-	"github.com/docker/distribution/reference"
+	"github.com/distribution/reference"
 	"github.com/moby/buildkit/cache/remotecache"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/util/compression"

+ 73 - 24
vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go

@@ -21,21 +21,26 @@ type CacheChains struct {
 	visited map[interface{}]struct{}
 }
 
+var _ solver.CacheExporterTarget = &CacheChains{}
+
 func (c *CacheChains) Add(dgst digest.Digest) solver.CacheExporterRecord {
 	if strings.HasPrefix(dgst.String(), "random:") {
+		// random digests will be different *every* run - so we shouldn't cache
+		// it, since there's a zero chance this random digest collides again
 		return &nopRecord{}
 	}
-	it := &item{c: c, dgst: dgst, backlinks: map[*item]struct{}{}}
+
+	it := &item{dgst: dgst, backlinks: map[*item]struct{}{}}
 	c.items = append(c.items, it)
 	return it
 }
 
-func (c *CacheChains) Visit(v interface{}) {
-	c.visited[v] = struct{}{}
+func (c *CacheChains) Visit(target any) {
+	c.visited[target] = struct{}{}
 }
 
-func (c *CacheChains) Visited(v interface{}) bool {
-	_, ok := c.visited[v]
+func (c *CacheChains) Visited(target any) bool {
+	_, ok := c.visited[target]
 	return ok
 }
 
@@ -76,6 +81,12 @@ func (c *CacheChains) normalize(ctx context.Context) error {
 	return nil
 }
 
+// Marshal converts the cache chains structure into a cache config and a
+// collection of providers for reading the results from.
+//
+// Marshal aims to validate, normalize and sort the output to ensure a
+// consistent digest (since cache configs are typically uploaded and stored in
+// content-addressable OCI registries).
 func (c *CacheChains) Marshal(ctx context.Context) (*CacheConfig, DescriptorProvider, error) {
 	if err := c.normalize(ctx); err != nil {
 		return nil, nil, err
@@ -109,19 +120,37 @@ type DescriptorProviderPair struct {
 	Provider   content.Provider
 }
 
+// item is an implementation of a record in the cache chain. After validation,
+// normalization and marshalling into the cache config, the item results form
+// into the "layers", while the digests and the links form into the "records".
 type item struct {
-	c    *CacheChains
+	// dgst is the unique identifier for each record.
+	// This *roughly* corresponds to an edge (vertex cachekey + index) in the
+	// solver - however, a single vertex can produce multiple unique cache keys
+	// (e.g. fast/slow), so it's a one-to-many relation.
 	dgst digest.Digest
 
+	// links are what connect records to each other (with an optional selector),
+	// organized by input index (which correspond to vertex inputs).
+	// We can have multiple links for each index, since *any* of these could be
+	// used to get to this item (e.g. we could retrieve by fast/slow key).
+	links []map[link]struct{}
+
+	// backlinks are the inverse of a link - these don't actually get directly
+	// exported, but they're internally used to help efficiently navigate the
+	// graph.
+	backlinks   map[*item]struct{}
+	backlinksMu sync.Mutex
+
+	// result is the result of computing the edge - this is the target of the
+	// data we actually want to store in the cache chain.
 	result     *solver.Remote
 	resultTime time.Time
 
-	links       []map[link]struct{}
-	backlinksMu sync.Mutex
-	backlinks   map[*item]struct{}
-	invalid     bool
+	invalid bool
 }
 
+// link is a pointer to an item, with an optional selector.
 type link struct {
 	src      *item
 	selector string
@@ -170,25 +199,46 @@ func (c *item) LinkFrom(rec solver.CacheExporterRecord, index int, selector stri
 	src.backlinksMu.Unlock()
 }
 
+// validate checks if an item is valid (i.e. each index has at least one link)
+// and marks it as such.
+//
+// Essentially, if an index has no links, it means that this cache record is
+// unreachable by the cache importer, so we should remove it. Once we've marked
+// an item as invalid, we remove it from it's backlinks and check it's
+// validity again - since now this linked item may be unreachable too.
 func (c *item) validate() {
+	if c.invalid {
+		// early exit, if the item is already invalid, we've already gone
+		// through the backlinks
+		return
+	}
+
 	for _, m := range c.links {
+		// if an index has no links, there's no way to access this record, so
+		// mark it as invalid
 		if len(m) == 0 {
 			c.invalid = true
-			for bl := range c.backlinks {
-				changed := false
-				for _, m := range bl.links {
-					for l := range m {
-						if l.src == c {
-							delete(m, l)
-							changed = true
-						}
+			break
+		}
+	}
+
+	if c.invalid {
+		for bl := range c.backlinks {
+			// remove ourselves from the backlinked item
+			changed := false
+			for _, m := range bl.links {
+				for l := range m {
+					if l.src == c {
+						delete(m, l)
+						changed = true
 					}
 				}
-				if changed {
-					bl.validate()
-				}
 			}
-			return
+
+			// if we've removed ourselves, we need to check it again
+			if changed {
+				bl.validate()
+			}
 		}
 	}
 }
@@ -211,6 +261,7 @@ func (c *item) walkAllResults(fn func(i *item) error, visited map[*item]struct{}
 	return nil
 }
 
+// nopRecord is used to discard cache results that we're not interested in storing.
 type nopRecord struct {
 }
 
@@ -219,5 +270,3 @@ func (c *nopRecord) AddResult(_ digest.Digest, _ int, createdAt time.Time, resul
 
 func (c *nopRecord) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) {
 }
-
-var _ solver.CacheExporterTarget = &CacheChains{}

+ 0 - 1
vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go

@@ -30,7 +30,6 @@ package cacheimport
 //    },
 //    {
 //      "digest": "sha256:deadbeef",
-//      "output": 1,                   <- optional output index
 //      "layers": [                    <- optional array of layer pointers
 //        {
 //          "createdat": "",

+ 13 - 3
vendor/github.com/moby/buildkit/cache/util/fsutil.go

@@ -90,17 +90,17 @@ type ReadDirRequest struct {
 func ReadDir(ctx context.Context, mount snapshot.Mountable, req ReadDirRequest) ([]*fstypes.Stat, error) {
 	var (
 		rd []*fstypes.Stat
-		wo fsutil.WalkOpt
+		fo fsutil.FilterOpt
 	)
 	if req.IncludePattern != "" {
-		wo.IncludePatterns = append(wo.IncludePatterns, req.IncludePattern)
+		fo.IncludePatterns = append(fo.IncludePatterns, req.IncludePattern)
 	}
 	err := withMount(ctx, mount, func(root string) error {
 		fp, err := fs.RootPath(root, req.Path)
 		if err != nil {
 			return errors.WithStack(err)
 		}
-		return fsutil.Walk(ctx, fp, &wo, func(path string, info os.FileInfo, err error) error {
+		return fsutil.Walk(ctx, fp, &fo, func(path string, info os.FileInfo, err error) error {
 			if err != nil {
 				return errors.Wrapf(err, "walking %q", root)
 			}
@@ -128,6 +128,16 @@ func StatFile(ctx context.Context, mount snapshot.Mountable, path string) (*fsty
 			return errors.WithStack(err)
 		}
 		if st, err = fsutil.Stat(fp); err != nil {
+			// The filename here is internal to the mount, so we can restore
+			// the request base path for error reporting.
+			// See os.DirFS.Open for details.
+			err1 := err
+			if err := errors.Cause(err); err != nil {
+				err1 = err
+			}
+			if pe, ok := err1.(*os.PathError); ok {
+				pe.Path = path
+			}
 			return errors.WithStack(err)
 		}
 		return nil

+ 5 - 0
vendor/github.com/moby/buildkit/client/build.go

@@ -84,6 +84,11 @@ func (g *gatewayClientForBuild) ResolveImageConfig(ctx context.Context, in *gate
 	return g.gateway.ResolveImageConfig(ctx, in, opts...)
 }
 
+func (g *gatewayClientForBuild) ResolveSourceMeta(ctx context.Context, in *gatewayapi.ResolveSourceMetaRequest, opts ...grpc.CallOption) (*gatewayapi.ResolveSourceMetaResponse, error) {
+	ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
+	return g.gateway.ResolveSourceMeta(ctx, in, opts...)
+}
+
 func (g *gatewayClientForBuild) Solve(ctx context.Context, in *gatewayapi.SolveRequest, opts ...grpc.CallOption) (*gatewayapi.SolveResponse, error) {
 	ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
 	return g.gateway.Solve(ctx, in, opts...)

+ 3 - 14
vendor/github.com/moby/buildkit/client/client.go

@@ -59,9 +59,6 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
 	var creds *withCredentials
 
 	for _, o := range opts {
-		if _, ok := o.(*withFailFast); ok {
-			gopts = append(gopts, grpc.FailOnNonTempDialError(true))
-		}
 		if credInfo, ok := o.(*withCredentials); ok {
 			if creds == nil {
 				creds = &withCredentials{}
@@ -105,8 +102,8 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
 
 	if tracerProvider != nil {
 		var propagators = propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})
-		unary = append(unary, filterInterceptor(otelgrpc.UnaryClientInterceptor(otelgrpc.WithTracerProvider(tracerProvider), otelgrpc.WithPropagators(propagators))))
-		stream = append(stream, otelgrpc.StreamClientInterceptor(otelgrpc.WithTracerProvider(tracerProvider), otelgrpc.WithPropagators(propagators)))
+		unary = append(unary, filterInterceptor(otelgrpc.UnaryClientInterceptor(otelgrpc.WithTracerProvider(tracerProvider), otelgrpc.WithPropagators(propagators)))) //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/buildkit/issues/4681
+		stream = append(stream, otelgrpc.StreamClientInterceptor(otelgrpc.WithTracerProvider(tracerProvider), otelgrpc.WithPropagators(propagators)))                 //nolint:staticcheck // TODO(thaJeztah): ignore SA1019 for deprecated options: see https://github.com/moby/buildkit/issues/4681
 	}
 
 	if needDialer {
@@ -205,7 +202,7 @@ func (c *Client) Wait(ctx context.Context) error {
 
 		select {
 		case <-ctx.Done():
-			return ctx.Err()
+			return context.Cause(ctx)
 		case <-time.After(time.Second):
 		}
 		c.conn.ResetConnectBackoff()
@@ -216,14 +213,6 @@ func (c *Client) Close() error {
 	return c.conn.Close()
 }
 
-type withFailFast struct{}
-
-func (*withFailFast) isClientOpt() {}
-
-func WithFailFast() ClientOpt {
-	return &withFailFast{}
-}
-
 type withDialer struct {
 	dialer func(context.Context, string) (net.Conn, error)
 }

+ 32 - 31
vendor/github.com/moby/buildkit/client/graph.go

@@ -8,49 +8,50 @@ import (
 )
 
 type Vertex struct {
-	Digest        digest.Digest
-	Inputs        []digest.Digest
-	Name          string
-	Started       *time.Time
-	Completed     *time.Time
-	Cached        bool
-	Error         string
-	ProgressGroup *pb.ProgressGroup
+	Digest        digest.Digest     `json:"digest,omitempty"`
+	Inputs        []digest.Digest   `json:"inputs,omitempty"`
+	Name          string            `json:"name,omitempty"`
+	Started       *time.Time        `json:"started,omitempty"`
+	Completed     *time.Time        `json:"completed,omitempty"`
+	Cached        bool              `json:"cached,omitempty"`
+	Error         string            `json:"error,omitempty"`
+	ProgressGroup *pb.ProgressGroup `json:"progressGroup,omitempty"`
 }
 
 type VertexStatus struct {
-	ID        string
-	Vertex    digest.Digest
-	Name      string
-	Total     int64
-	Current   int64
-	Timestamp time.Time
-	Started   *time.Time
-	Completed *time.Time
+	ID        string        `json:"id"`
+	Vertex    digest.Digest `json:"vertex,omitempty"`
+	Name      string        `json:"name,omitempty"`
+	Total     int64         `json:"total,omitempty"`
+	Current   int64         `json:"current"`
+	Timestamp time.Time     `json:"timestamp,omitempty"`
+	Started   *time.Time    `json:"started,omitempty"`
+	Completed *time.Time    `json:"completed,omitempty"`
 }
 
 type VertexLog struct {
-	Vertex    digest.Digest
-	Stream    int
-	Data      []byte
-	Timestamp time.Time
+	Vertex    digest.Digest `json:"vertex,omitempty"`
+	Stream    int           `json:"stream,omitempty"`
+	Data      []byte        `json:"data"`
+	Timestamp time.Time     `json:"timestamp"`
 }
 
 type VertexWarning struct {
-	Vertex     digest.Digest
-	Level      int
-	Short      []byte
-	Detail     [][]byte
-	URL        string
-	SourceInfo *pb.SourceInfo
-	Range      []*pb.Range
+	Vertex digest.Digest `json:"vertex,omitempty"`
+	Level  int           `json:"level,omitempty"`
+	Short  []byte        `json:"short,omitempty"`
+	Detail [][]byte      `json:"detail,omitempty"`
+	URL    string        `json:"url,omitempty"`
+
+	SourceInfo *pb.SourceInfo `json:"sourceInfo,omitempty"`
+	Range      []*pb.Range    `json:"range,omitempty"`
 }
 
 type SolveStatus struct {
-	Vertexes []*Vertex
-	Statuses []*VertexStatus
-	Logs     []*VertexLog
-	Warnings []*VertexWarning
+	Vertexes []*Vertex        `json:"vertexes,omitempty"`
+	Statuses []*VertexStatus  `json:"statuses,omitempty"`
+	Logs     []*VertexLog     `json:"logs,omitempty"`
+	Warnings []*VertexWarning `json:"warnings,omitempty"`
 }
 
 type SolveResponse struct {

+ 1 - 1
vendor/github.com/moby/buildkit/client/llb/async.go

@@ -61,7 +61,7 @@ func (as *asyncState) Do(ctx context.Context, c *Constraints) error {
 		if err != nil {
 			select {
 			case <-ctx.Done():
-				if errors.Is(err, ctx.Err()) {
+				if errors.Is(err, context.Cause(ctx)) {
 					return res, err
 				}
 			default:

+ 26 - 0
vendor/github.com/moby/buildkit/client/llb/exec.go

@@ -46,6 +46,7 @@ type mount struct {
 	tmpfsOpt     TmpfsInfo
 	cacheSharing CacheMountSharingMode
 	noOutput     bool
+	contentCache MountContentCache
 }
 
 type ExecOp struct {
@@ -281,6 +282,9 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
 		} else if m.source != nil {
 			addCap(&e.constraints, pb.CapExecMountBind)
 		}
+		if m.contentCache != MountContentCacheDefault {
+			addCap(&e.constraints, pb.CapExecMountContentCache)
+		}
 	}
 
 	if len(e.secrets) > 0 {
@@ -366,6 +370,14 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
 				pm.CacheOpt.Sharing = pb.CacheSharingOpt_LOCKED
 			}
 		}
+		switch m.contentCache {
+		case MountContentCacheDefault:
+			pm.ContentCache = pb.MountContentCache_DEFAULT
+		case MountContentCacheOn:
+			pm.ContentCache = pb.MountContentCache_ON
+		case MountContentCacheOff:
+			pm.ContentCache = pb.MountContentCache_OFF
+		}
 		if m.tmpfs {
 			pm.MountType = pb.MountType_TMPFS
 			pm.TmpfsOpt = &pb.TmpfsOpt{
@@ -492,6 +504,12 @@ func ForceNoOutput(m *mount) {
 	m.noOutput = true
 }
 
+func ContentCache(cache MountContentCache) MountOption {
+	return func(m *mount) {
+		m.contentCache = cache
+	}
+}
+
 func AsPersistentCacheDir(id string, sharing CacheMountSharingMode) MountOption {
 	return func(m *mount) {
 		m.cacheID = id
@@ -783,3 +801,11 @@ const (
 	UlimitSigpending UlimitName = "sigpending"
 	UlimitStack      UlimitName = "stack"
 )
+
+type MountContentCache int
+
+const (
+	MountContentCacheDefault MountContentCache = iota
+	MountContentCacheOn
+	MountContentCacheOff
+)

+ 12 - 0
vendor/github.com/moby/buildkit/client/llb/fileop.go

@@ -398,6 +398,18 @@ func WithAllowWildcard(b bool) RmOption {
 	})
 }
 
+type excludeOnCopyAction struct {
+	patterns []string
+}
+
+func (e *excludeOnCopyAction) SetCopyOption(i *CopyInfo) {
+	i.ExcludePatterns = append(i.ExcludePatterns, e.patterns...)
+}
+
+func WithExcludePatterns(patterns []string) CopyOption {
+	return &excludeOnCopyAction{patterns}
+}
+
 type fileActionRm struct {
 	file string
 	info RmInfo

+ 8 - 8
vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go

@@ -9,6 +9,7 @@ import (
 	"github.com/containerd/containerd/remotes"
 	"github.com/containerd/containerd/remotes/docker"
 	"github.com/moby/buildkit/client/llb"
+	"github.com/moby/buildkit/client/llb/sourceresolver"
 	"github.com/moby/buildkit/util/contentutil"
 	"github.com/moby/buildkit/util/imageutil"
 	"github.com/moby/buildkit/version"
@@ -70,32 +71,31 @@ type imageMetaResolver struct {
 }
 
 type resolveResult struct {
-	ref    string
 	config []byte
 	dgst   digest.Digest
 }
 
-func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (string, digest.Digest, []byte, error) {
+func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt sourceresolver.Opt) (string, digest.Digest, []byte, error) {
 	imr.locker.Lock(ref)
 	defer imr.locker.Unlock(ref)
 
-	platform := opt.Platform
-	if platform == nil {
-		platform = imr.platform
+	platform := imr.platform
+	if opt.Platform != nil {
+		platform = opt.Platform
 	}
 
 	k := imr.key(ref, platform)
 
 	if res, ok := imr.cache[k]; ok {
-		return res.ref, res.dgst, res.config, nil
+		return ref, res.dgst, res.config, nil
 	}
 
-	ref, dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, nil, platform, opt.SourcePolicies)
+	dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, nil, platform)
 	if err != nil {
 		return "", "", nil, err
 	}
 
-	imr.cache[k] = resolveResult{dgst: dgst, config: config, ref: ref}
+	imr.cache[k] = resolveResult{dgst: dgst, config: config}
 	return ref, dgst, config, nil
 }
 

+ 11 - 7
vendor/github.com/moby/buildkit/client/llb/marshal.go

@@ -95,14 +95,18 @@ func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) {
 		c.Platform = &defaultPlatform
 	}
 
+	opPlatform := pb.Platform{
+		OS:           c.Platform.OS,
+		Architecture: c.Platform.Architecture,
+		Variant:      c.Platform.Variant,
+		OSVersion:    c.Platform.OSVersion,
+	}
+	if c.Platform.OSFeatures != nil {
+		opPlatform.OSFeatures = append([]string{}, c.Platform.OSFeatures...)
+	}
+
 	return &pb.Op{
-		Platform: &pb.Platform{
-			OS:           c.Platform.OS,
-			Architecture: c.Platform.Architecture,
-			Variant:      c.Platform.Variant,
-			OSVersion:    c.Platform.OSVersion,
-			OSFeatures:   c.Platform.OSFeatures,
-		},
+		Platform: &opPlatform,
 		Constraints: &pb.WorkerConstraints{
 			Filter: c.WorkerConstraints,
 		},

+ 2 - 32
vendor/github.com/moby/buildkit/client/llb/resolver.go

@@ -1,11 +1,7 @@
 package llb
 
 import (
-	"context"
-
-	spb "github.com/moby/buildkit/sourcepolicy/pb"
-	digest "github.com/opencontainers/go-digest"
-	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/moby/buildkit/client/llb/sourceresolver"
 )
 
 // WithMetaResolver adds a metadata resolver to an image
@@ -31,30 +27,4 @@ func WithLayerLimit(l int) ImageOption {
 }
 
 // ImageMetaResolver can resolve image config metadata from a reference
-type ImageMetaResolver interface {
-	ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (string, digest.Digest, []byte, error)
-}
-
-type ResolverType int
-
-const (
-	ResolverTypeRegistry ResolverType = iota
-	ResolverTypeOCILayout
-)
-
-type ResolveImageConfigOpt struct {
-	ResolverType
-
-	Platform    *ocispecs.Platform
-	ResolveMode string
-	LogName     string
-
-	Store ResolveImageConfigOptStore
-
-	SourcePolicies []*spb.Policy
-}
-
-type ResolveImageConfigOptStore struct {
-	SessionID string
-	StoreID   string
-}
+type ImageMetaResolver = sourceresolver.ImageMetaResolver

+ 37 - 33
vendor/github.com/moby/buildkit/client/llb/source.go

@@ -5,10 +5,12 @@ import (
 	_ "crypto/sha256" // for opencontainers/go-digest
 	"encoding/json"
 	"os"
+	"path"
 	"strconv"
 	"strings"
 
-	"github.com/docker/distribution/reference"
+	"github.com/distribution/reference"
+	"github.com/moby/buildkit/client/llb/sourceresolver"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/util/apicaps"
 	"github.com/moby/buildkit/util/gitutil"
@@ -135,10 +137,11 @@ func Image(ref string, opts ...ImageOption) State {
 				if p == nil {
 					p = c.Platform
 				}
-				_, _, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, ResolveImageConfigOpt{
-					Platform:     p,
-					ResolveMode:  info.resolveMode.String(),
-					ResolverType: ResolverTypeRegistry,
+				_, _, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, sourceresolver.Opt{
+					Platform: p,
+					ImageOpt: &sourceresolver.ResolveImageOpt{
+						ResolveMode: info.resolveMode.String(),
+					},
 				})
 				if err != nil {
 					return State{}, err
@@ -151,10 +154,11 @@ func Image(ref string, opts ...ImageOption) State {
 			if p == nil {
 				p = c.Platform
 			}
-			ref, dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{
-				Platform:     p,
-				ResolveMode:  info.resolveMode.String(),
-				ResolverType: ResolverTypeRegistry,
+			ref, dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, sourceresolver.Opt{
+				Platform: p,
+				ImageOpt: &sourceresolver.ResolveImageOpt{
+					ResolveMode: info.resolveMode.String(),
+				},
 			})
 			if err != nil {
 				return State{}, err
@@ -226,7 +230,7 @@ type ImageInfo struct {
 // Git returns a state that represents a git repository.
 // Example:
 //
-//	st := llb.Git("https://github.com/moby/buildkit.git#v0.11.6")
+//	st := llb.Git("https://github.com/moby/buildkit.git", "v0.11.6")
 //
 // The example fetches the v0.11.6 tag of the buildkit repository.
 // You can also use a commit hash or a branch name.
@@ -237,29 +241,29 @@ type ImageInfo struct {
 //
 // By default the git repository is cloned with `--depth=1` to reduce the amount of data downloaded.
 // Additionally the ".git" directory is removed after the clone, you can keep ith with the [KeepGitDir] [GitOption].
-func Git(remote, ref string, opts ...GitOption) State {
-	url := strings.Split(remote, "#")[0]
-
-	var protocolType int
-	remote, protocolType = gitutil.ParseProtocol(remote)
-
-	var sshHost string
-	if protocolType == gitutil.SSHProtocol {
-		parts := strings.SplitN(remote, ":", 2)
-		if len(parts) == 2 {
-			sshHost = parts[0]
-			// keep remote consistent with http(s) version
-			remote = parts[0] + "/" + parts[1]
-		}
-	}
-	if protocolType == gitutil.UnknownProtocol {
+func Git(url, ref string, opts ...GitOption) State {
+	remote, err := gitutil.ParseURL(url)
+	if errors.Is(err, gitutil.ErrUnknownProtocol) {
 		url = "https://" + url
+		remote, err = gitutil.ParseURL(url)
+	}
+	if remote != nil {
+		url = remote.Remote
 	}
 
-	id := remote
-
-	if ref != "" {
-		id += "#" + ref
+	var id string
+	if err != nil {
+		// If we can't parse the URL, just use the full URL as the ID. The git
+		// operation will fail later on.
+		id = url
+	} else {
+		// We construct the ID manually here, so that we can create the same ID
+		// for different protocols (e.g. https and ssh) that have the same
+		// host/path/fragment combination.
+		id = remote.Host + path.Join("/", remote.Path)
+		if ref != "" {
+			id += "#" + ref
+		}
 	}
 
 	gi := &GitInfo{
@@ -290,11 +294,11 @@ func Git(remote, ref string, opts ...GitOption) State {
 			addCap(&gi.Constraints, pb.CapSourceGitHTTPAuth)
 		}
 	}
-	if protocolType == gitutil.SSHProtocol {
+	if remote != nil && remote.Scheme == gitutil.SSHProtocol {
 		if gi.KnownSSHHosts != "" {
 			attrs[pb.AttrKnownSSHHosts] = gi.KnownSSHHosts
-		} else if sshHost != "" {
-			keyscan, err := sshutil.SSHKeyScan(sshHost)
+		} else {
+			keyscan, err := sshutil.SSHKeyScan(remote.Host)
 			if err == nil {
 				// best effort
 				attrs[pb.AttrKnownSSHHosts] = keyscan

+ 59 - 0
vendor/github.com/moby/buildkit/client/llb/sourceresolver/imageresolver.go

@@ -0,0 +1,59 @@
+package sourceresolver
+
+import (
+	"context"
+	"strings"
+
+	"github.com/distribution/reference"
+	"github.com/moby/buildkit/solver/pb"
+	"github.com/moby/buildkit/util/imageutil"
+	digest "github.com/opencontainers/go-digest"
+	"github.com/pkg/errors"
+)
+
+type ImageMetaResolver interface {
+	ResolveImageConfig(ctx context.Context, ref string, opt Opt) (string, digest.Digest, []byte, error)
+}
+
+type imageMetaResolver struct {
+	mr MetaResolver
+}
+
+var _ ImageMetaResolver = &imageMetaResolver{}
+
+func NewImageMetaResolver(mr MetaResolver) ImageMetaResolver {
+	return &imageMetaResolver{
+		mr: mr,
+	}
+}
+
+func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt Opt) (string, digest.Digest, []byte, error) {
+	parsed, err := reference.ParseNormalizedNamed(ref)
+	if err != nil {
+		return "", "", nil, errors.Wrapf(err, "could not parse reference %q", ref)
+	}
+	ref = parsed.String()
+	op := &pb.SourceOp{
+		Identifier: "docker-image://" + ref,
+	}
+	if opt := opt.OCILayoutOpt; opt != nil {
+		op.Identifier = "oci-layout://" + ref
+		op.Attrs = map[string]string{}
+		if opt.Store.SessionID != "" {
+			op.Attrs[pb.AttrOCILayoutSessionID] = opt.Store.SessionID
+		}
+		if opt.Store.StoreID != "" {
+			op.Attrs[pb.AttrOCILayoutStoreID] = opt.Store.StoreID
+		}
+	}
+	res, err := imr.mr.ResolveSourceMetadata(ctx, op, opt)
+	if err != nil {
+		return "", "", nil, errors.Wrapf(err, "failed to resolve source metadata for %s", ref)
+	}
+	if res.Image == nil {
+		return "", "", nil, &imageutil.ResolveToNonImageError{Ref: ref, Updated: res.Op.Identifier}
+	}
+	ref = strings.TrimPrefix(res.Op.Identifier, "docker-image://")
+	ref = strings.TrimPrefix(ref, "oci-layout://")
+	return ref, res.Image.Digest, res.Image.Config, nil
+}

+ 54 - 0
vendor/github.com/moby/buildkit/client/llb/sourceresolver/types.go

@@ -0,0 +1,54 @@
+package sourceresolver
+
+import (
+	"context"
+
+	"github.com/moby/buildkit/solver/pb"
+	spb "github.com/moby/buildkit/sourcepolicy/pb"
+	digest "github.com/opencontainers/go-digest"
+	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type ResolverType int
+
+const (
+	ResolverTypeRegistry ResolverType = iota
+	ResolverTypeOCILayout
+)
+
+type MetaResolver interface {
+	ResolveSourceMetadata(ctx context.Context, op *pb.SourceOp, opt Opt) (*MetaResponse, error)
+}
+
+type Opt struct {
+	LogName        string
+	SourcePolicies []*spb.Policy
+	Platform       *ocispecs.Platform
+
+	ImageOpt     *ResolveImageOpt
+	OCILayoutOpt *ResolveOCILayoutOpt
+}
+
+type MetaResponse struct {
+	Op *pb.SourceOp
+
+	Image *ResolveImageResponse
+}
+
+type ResolveImageOpt struct {
+	ResolveMode string
+}
+
+type ResolveImageResponse struct {
+	Digest digest.Digest
+	Config []byte
+}
+
+type ResolveOCILayoutOpt struct {
+	Store ResolveImageConfigOptStore
+}
+
+type ResolveImageConfigOptStore struct {
+	SessionID string
+	StoreID   string
+}

+ 9 - 4
vendor/github.com/moby/buildkit/client/llb/state.go

@@ -229,7 +229,7 @@ func (s State) Output() Output {
 	return s.out
 }
 
-// WithOutput creats a new state with the output set to the given output.
+// WithOutput creates a new state with the output set to the given output.
 func (s State) WithOutput(o Output) State {
 	prev := s
 	s = State{
@@ -258,16 +258,21 @@ func (s State) WithImageConfig(c []byte) (State, error) {
 	}
 	s = s.Dir(img.Config.WorkingDir)
 	if img.Architecture != "" && img.OS != "" {
-		s = s.Platform(ocispecs.Platform{
+		plat := ocispecs.Platform{
 			OS:           img.OS,
 			Architecture: img.Architecture,
 			Variant:      img.Variant,
-		})
+			OSVersion:    img.OSVersion,
+		}
+		if img.OSFeatures != nil {
+			plat.OSFeatures = append([]string{}, img.OSFeatures...)
+		}
+		s = s.Platform(plat)
 	}
 	return s, nil
 }
 
-// Run performs the command specified by the arguments within the contexst of the current [State].
+// Run performs the command specified by the arguments within the context of the current [State].
 // The command is executed as a container with the [State]'s filesystem as the root filesystem.
 // As such any command you run must be present in the [State]'s filesystem.
 // Constraints such as [State.Ulimit], [State.ParentCgroup], [State.Network], etc. are applied to the container.

+ 1 - 4
vendor/github.com/moby/buildkit/client/ociindex/ociindex.go

@@ -12,9 +12,6 @@ import (
 )
 
 const (
-	// indexFile is the name of the index file
-	indexFile = "index.json"
-
 	// lockFileSuffix is the suffix of the lock file
 	lockFileSuffix = ".lock"
 )
@@ -26,7 +23,7 @@ type StoreIndex struct {
 }
 
 func NewStoreIndex(storePath string) StoreIndex {
-	indexPath := path.Join(storePath, indexFile)
+	indexPath := path.Join(storePath, ocispecs.ImageIndexFile)
 	layoutPath := path.Join(storePath, ocispecs.ImageLayoutFile)
 	return StoreIndex{
 		indexPath:  indexPath,

+ 126 - 87
vendor/github.com/moby/buildkit/client/solve.go

@@ -35,7 +35,8 @@ import (
 
 type SolveOpt struct {
 	Exports               []ExportEntry
-	LocalDirs             map[string]string
+	LocalDirs             map[string]string // Deprecated: use LocalMounts
+	LocalMounts           map[string]fsutil.FS
 	OCIStores             map[string]content.Store
 	SharedKey             string
 	Frontend              string
@@ -55,8 +56,8 @@ type SolveOpt struct {
 type ExportEntry struct {
 	Type      string
 	Attrs     map[string]string
-	Output    func(map[string]string) (io.WriteCloser, error) // for ExporterOCI and ExporterDocker
-	OutputDir string                                          // for ExporterLocal
+	Output    filesync.FileOutputFunc // for ExporterOCI and ExporterDocker
+	OutputDir string                  // for ExporterLocal
 }
 
 type CacheOptionsEntry struct {
@@ -90,7 +91,11 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
 		return nil, errors.New("invalid with def and cb")
 	}
 
-	syncedDirs, err := prepareSyncedDirs(def, opt.LocalDirs)
+	mounts, err := prepareMounts(&opt)
+	if err != nil {
+		return nil, err
+	}
+	syncedDirs, err := prepareSyncedFiles(def, mounts)
 	if err != nil {
 		return nil, err
 	}
@@ -101,8 +106,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
 	}
 	eg, ctx := errgroup.WithContext(ctx)
 
-	statusContext, cancelStatus := context.WithCancel(context.Background())
-	defer cancelStatus()
+	statusContext, cancelStatus := context.WithCancelCause(context.Background())
+	defer cancelStatus(errors.WithStack(context.Canceled))
 
 	if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() {
 		statusContext = trace.ContextWithSpan(statusContext, span)
@@ -125,14 +130,6 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
 		return nil, err
 	}
 
-	var ex ExportEntry
-	if len(opt.Exports) > 1 {
-		return nil, errors.New("currently only single Exports can be specified")
-	}
-	if len(opt.Exports) == 1 {
-		ex = opt.Exports[0]
-	}
-
 	storesToUpdate := []string{}
 
 	if !opt.SessionPreInitialized {
@@ -156,51 +153,52 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
 			contentStores[key2] = store
 		}
 
-		var supportFile bool
-		var supportDir bool
-		switch ex.Type {
-		case ExporterLocal:
-			supportDir = true
-		case ExporterTar:
-			supportFile = true
-		case ExporterOCI, ExporterDocker:
-			supportDir = ex.OutputDir != ""
-			supportFile = ex.Output != nil
-		}
-
-		if supportFile && supportDir {
-			return nil, errors.Errorf("both file and directory output is not supported by %s exporter", ex.Type)
-		}
-		if !supportFile && ex.Output != nil {
-			return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
-		}
-		if !supportDir && ex.OutputDir != "" {
-			return nil, errors.Errorf("output directory is not supported by %s exporter", ex.Type)
-		}
-
-		if supportFile {
-			if ex.Output == nil {
-				return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
-			}
-			s.Allow(filesync.NewFSSyncTarget(ex.Output))
-		}
-		if supportDir {
-			if ex.OutputDir == "" {
-				return nil, errors.Errorf("output directory is required for %s exporter", ex.Type)
-			}
+		var syncTargets []filesync.FSSyncTarget
+		for exID, ex := range opt.Exports {
+			var supportFile bool
+			var supportDir bool
 			switch ex.Type {
+			case ExporterLocal:
+				supportDir = true
+			case ExporterTar:
+				supportFile = true
 			case ExporterOCI, ExporterDocker:
-				if err := os.MkdirAll(ex.OutputDir, 0755); err != nil {
-					return nil, err
+				supportDir = ex.OutputDir != ""
+				supportFile = ex.Output != nil
+			}
+			if supportFile && supportDir {
+				return nil, errors.Errorf("both file and directory output is not supported by %s exporter", ex.Type)
+			}
+			if !supportFile && ex.Output != nil {
+				return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
+			}
+			if !supportDir && ex.OutputDir != "" {
+				return nil, errors.Errorf("output directory is not supported by %s exporter", ex.Type)
+			}
+			if supportFile {
+				if ex.Output == nil {
+					return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
 				}
-				cs, err := contentlocal.NewStore(ex.OutputDir)
-				if err != nil {
-					return nil, err
+				syncTargets = append(syncTargets, filesync.WithFSSync(exID, ex.Output))
+			}
+			if supportDir {
+				if ex.OutputDir == "" {
+					return nil, errors.Errorf("output directory is required for %s exporter", ex.Type)
+				}
+				switch ex.Type {
+				case ExporterOCI, ExporterDocker:
+					if err := os.MkdirAll(ex.OutputDir, 0755); err != nil {
+						return nil, err
+					}
+					cs, err := contentlocal.NewStore(ex.OutputDir)
+					if err != nil {
+						return nil, err
+					}
+					contentStores["export"] = cs
+					storesToUpdate = append(storesToUpdate, ex.OutputDir)
+				default:
+					syncTargets = append(syncTargets, filesync.WithFSSyncDir(exID, ex.OutputDir))
 				}
-				contentStores["export"] = cs
-				storesToUpdate = append(storesToUpdate, ex.OutputDir)
-			default:
-				s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
 			}
 		}
 
@@ -208,6 +206,10 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
 			s.Allow(sessioncontent.NewAttachable(contentStores))
 		}
 
+		if len(syncTargets) > 0 {
+			s.Allow(filesync.NewFSSyncTarget(syncTargets...))
+		}
+
 		eg.Go(func() error {
 			sd := c.sessionDialer
 			if sd == nil {
@@ -225,16 +227,16 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
 		frontendAttrs[k] = v
 	}
 
-	solveCtx, cancelSolve := context.WithCancel(ctx)
+	solveCtx, cancelSolve := context.WithCancelCause(ctx)
 	var res *SolveResponse
 	eg.Go(func() error {
 		ctx := solveCtx
-		defer cancelSolve()
+		defer cancelSolve(errors.WithStack(context.Canceled))
 
 		defer func() { // make sure the Status ends cleanly on build errors
 			go func() {
 				<-time.After(3 * time.Second)
-				cancelStatus()
+				cancelStatus(errors.WithStack(context.Canceled))
 			}()
 			if !opt.SessionPreInitialized {
 				bklog.G(ctx).Debugf("stopping session")
@@ -255,19 +257,34 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
 			frontendInputs[key] = def.ToPB()
 		}
 
+		exports := make([]*controlapi.Exporter, 0, len(opt.Exports))
+		exportDeprecated := ""
+		exportAttrDeprecated := map[string]string{}
+		for i, exp := range opt.Exports {
+			if i == 0 {
+				exportDeprecated = exp.Type
+				exportAttrDeprecated = exp.Attrs
+			}
+			exports = append(exports, &controlapi.Exporter{
+				Type:  exp.Type,
+				Attrs: exp.Attrs,
+			})
+		}
+
 		resp, err := c.ControlClient().Solve(ctx, &controlapi.SolveRequest{
-			Ref:            ref,
-			Definition:     pbd,
-			Exporter:       ex.Type,
-			ExporterAttrs:  ex.Attrs,
-			Session:        s.ID(),
-			Frontend:       opt.Frontend,
-			FrontendAttrs:  frontendAttrs,
-			FrontendInputs: frontendInputs,
-			Cache:          cacheOpt.options,
-			Entitlements:   opt.AllowedEntitlements,
-			Internal:       opt.Internal,
-			SourcePolicy:   opt.SourcePolicy,
+			Ref:                     ref,
+			Definition:              pbd,
+			Exporters:               exports,
+			ExporterDeprecated:      exportDeprecated,
+			ExporterAttrsDeprecated: exportAttrDeprecated,
+			Session:                 s.ID(),
+			Frontend:                opt.Frontend,
+			FrontendAttrs:           frontendAttrs,
+			FrontendInputs:          frontendInputs,
+			Cache:                   cacheOpt.options,
+			Entitlements:            opt.AllowedEntitlements,
+			Internal:                opt.Internal,
+			SourcePolicy:            opt.SourcePolicy,
 		})
 		if err != nil {
 			return errors.Wrap(err, "failed to solve")
@@ -293,7 +310,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
 			select {
 			case <-solveCtx.Done():
 			case <-time.After(5 * time.Second):
-				cancelSolve()
+				cancelSolve(errors.WithStack(context.Canceled))
 			}
 
 			return err
@@ -361,26 +378,23 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
 	return res, nil
 }
 
-func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) (filesync.StaticDirSource, error) {
-	for _, d := range localDirs {
-		fi, err := os.Stat(d)
-		if err != nil {
-			return nil, errors.Wrapf(err, "could not find %s", d)
-		}
-		if !fi.IsDir() {
-			return nil, errors.Errorf("%s not a directory", d)
-		}
-	}
+func prepareSyncedFiles(def *llb.Definition, localMounts map[string]fsutil.FS) (filesync.StaticDirSource, error) {
 	resetUIDAndGID := func(p string, st *fstypes.Stat) fsutil.MapResult {
 		st.Uid = 0
 		st.Gid = 0
 		return fsutil.MapResultKeep
 	}
 
-	dirs := make(filesync.StaticDirSource, len(localDirs))
+	result := make(filesync.StaticDirSource, len(localMounts))
 	if def == nil {
-		for name, d := range localDirs {
-			dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID}
+		for name, mount := range localMounts {
+			mount, err := fsutil.NewFilterFS(mount, &fsutil.FilterOpt{
+				Map: resetUIDAndGID,
+			})
+			if err != nil {
+				return nil, err
+			}
+			result[name] = mount
 		}
 	} else {
 		for _, dt := range def.Def {
@@ -391,16 +405,22 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) (filesy
 			if src := op.GetSource(); src != nil {
 				if strings.HasPrefix(src.Identifier, "local://") {
 					name := strings.TrimPrefix(src.Identifier, "local://")
-					d, ok := localDirs[name]
+					mount, ok := localMounts[name]
 					if !ok {
 						return nil, errors.Errorf("local directory %s not enabled", name)
 					}
-					dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID}
+					mount, err := fsutil.NewFilterFS(mount, &fsutil.FilterOpt{
+						Map: resetUIDAndGID,
+					})
+					if err != nil {
+						return nil, err
+					}
+					result[name] = mount
 				}
 			}
 		}
 	}
-	return dirs, nil
+	return result, nil
 }
 
 func defaultSessionName() string {
@@ -523,3 +543,22 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
 	}
 	return &res, nil
 }
+
+func prepareMounts(opt *SolveOpt) (map[string]fsutil.FS, error) {
+	// merge local mounts and fallback local directories together
+	mounts := make(map[string]fsutil.FS)
+	for k, mount := range opt.LocalMounts {
+		mounts[k] = mount
+	}
+	for k, dir := range opt.LocalDirs {
+		mount, err := fsutil.NewFS(dir)
+		if err != nil {
+			return nil, err
+		}
+		if _, ok := mounts[k]; ok {
+			return nil, errors.Errorf("local mount %s already exists", k)
+		}
+		mounts[k] = mount
+	}
+	return mounts, nil
+}

+ 23 - 0
vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go

@@ -14,9 +14,15 @@ type Config struct {
 
 	// Entitlements e.g. security.insecure, network.host
 	Entitlements []string `toml:"insecure-entitlements"`
+
+	// LogFormat is the format of the logs. It can be "json" or "text".
+	Log LogConfig `toml:"log"`
+
 	// GRPC configuration settings
 	GRPC GRPCConfig `toml:"grpc"`
 
+	OTEL OTELConfig `toml:"otel"`
+
 	Workers struct {
 		OCI        OCIConfig        `toml:"oci"`
 		Containerd ContainerdConfig `toml:"containerd"`
@@ -29,6 +35,10 @@ type Config struct {
 	History *HistoryConfig `toml:"history"`
 }
 
+type LogConfig struct {
+	Format string `toml:"format"`
+}
+
 type GRPCConfig struct {
 	Address      []string `toml:"address"`
 	DebugAddress string   `toml:"debugAddress"`
@@ -46,6 +56,10 @@ type TLSConfig struct {
 	CA   string `toml:"ca"`
 }
 
+type OTELConfig struct {
+	SocketPath string `toml:"socketPath"`
+}
+
 type GCConfig struct {
 	GC            *bool      `toml:"gc"`
 	GCKeepStorage DiskSpace  `toml:"gckeepstorage"`
@@ -57,6 +71,8 @@ type NetworkConfig struct {
 	CNIConfigPath string `toml:"cniConfigPath"`
 	CNIBinaryPath string `toml:"cniBinaryPath"`
 	CNIPoolSize   int    `toml:"cniPoolSize"`
+	BridgeName    string `toml:"bridgeName"`
+	BridgeSubnet  string `toml:"bridgeSubnet"`
 }
 
 type OCIConfig struct {
@@ -98,6 +114,7 @@ type ContainerdConfig struct {
 	Labels    map[string]string `toml:"labels"`
 	Platforms []string          `toml:"platforms"`
 	Namespace string            `toml:"namespace"`
+	Runtime   ContainerdRuntime `toml:"runtime"`
 	GCConfig
 	NetworkConfig
 	Snapshotter string `toml:"snapshotter"`
@@ -114,6 +131,12 @@ type ContainerdConfig struct {
 	Rootless bool `toml:"rootless"`
 }
 
+type ContainerdRuntime struct {
+	Name    string                 `toml:"name"`
+	Path    string                 `toml:"path"`
+	Options map[string]interface{} `toml:"options"`
+}
+
 type GCPolicy struct {
 	All          bool      `toml:"all"`
 	KeepBytes    DiskSpace `toml:"keepBytes"`

+ 30 - 17
vendor/github.com/moby/buildkit/control/control.go

@@ -11,7 +11,7 @@ import (
 	contentapi "github.com/containerd/containerd/api/services/content/v1"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/services/content/contentserver"
-	"github.com/docker/distribution/reference"
+	"github.com/distribution/reference"
 	"github.com/hashicorp/go-multierror"
 	"github.com/mitchellh/hashstructure/v2"
 	controlapi "github.com/moby/buildkit/api/services/control"
@@ -130,11 +130,12 @@ func (c *Controller) Close() error {
 	if err := c.opt.WorkerController.Close(); err != nil {
 		rerr = multierror.Append(rerr, err)
 	}
-
 	if err := c.opt.CacheStore.Close(); err != nil {
 		rerr = multierror.Append(rerr, err)
 	}
-
+	if err := c.solver.Close(); err != nil {
+		rerr = multierror.Append(rerr, err)
+	}
 	return rerr
 }
 
@@ -313,6 +314,7 @@ func translateLegacySolveRequest(req *controlapi.SolveRequest) {
 		req.Cache.ExportRefDeprecated = ""
 		req.Cache.ExportAttrsDeprecated = nil
 	}
+
 	// translates ImportRefs to new Imports (v0.4.0)
 	for _, legacyImportRef := range req.Cache.ImportRefsDeprecated {
 		im := &controlapi.CacheOptionsEntry{
@@ -323,6 +325,16 @@ func translateLegacySolveRequest(req *controlapi.SolveRequest) {
 		req.Cache.Imports = append(req.Cache.Imports, im)
 	}
 	req.Cache.ImportRefsDeprecated = nil
+
+	// translate single exporter to a slice (v0.13.0)
+	if len(req.Exporters) == 0 && req.ExporterDeprecated != "" {
+		req.Exporters = append(req.Exporters, &controlapi.Exporter{
+			Type:  req.ExporterDeprecated,
+			Attrs: req.ExporterAttrsDeprecated,
+		})
+		req.ExporterDeprecated = ""
+		req.ExporterAttrsDeprecated = nil
+	}
 }
 
 func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) {
@@ -335,7 +347,6 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
 		time.AfterFunc(time.Second, c.throttledGC)
 	}()
 
-	var expi exporter.ExporterInstance
 	// TODO: multiworker
 	// This is actually tricky, as the exporter should come from the worker that has the returned reference. We may need to delay this so that the solver loads this.
 	w, err := c.opt.WorkerController.GetDefault()
@@ -343,25 +354,29 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
 		return nil, err
 	}
 
-	// if SOURCE_DATE_EPOCH is set, enable it for the exporter
+	// if SOURCE_DATE_EPOCH is set, enable it for the exporters
 	if v, ok := epoch.ParseBuildArgs(req.FrontendAttrs); ok {
-		if _, ok := req.ExporterAttrs[string(exptypes.OptKeySourceDateEpoch)]; !ok {
-			if req.ExporterAttrs == nil {
-				req.ExporterAttrs = make(map[string]string)
+		for _, ex := range req.Exporters {
+			if _, ok := ex.Attrs[string(exptypes.OptKeySourceDateEpoch)]; !ok {
+				if ex.Attrs == nil {
+					ex.Attrs = make(map[string]string)
+				}
+				ex.Attrs[string(exptypes.OptKeySourceDateEpoch)] = v
 			}
-			req.ExporterAttrs[string(exptypes.OptKeySourceDateEpoch)] = v
 		}
 	}
 
-	if req.Exporter != "" {
-		exp, err := w.Exporter(req.Exporter, c.opt.SessionManager)
+	var expis []exporter.ExporterInstance
+	for i, ex := range req.Exporters {
+		exp, err := w.Exporter(ex.Type, c.opt.SessionManager)
 		if err != nil {
 			return nil, err
 		}
-		expi, err = exp.Resolve(ctx, req.ExporterAttrs)
+		expi, err := exp.Resolve(ctx, i, ex.Attrs)
 		if err != nil {
 			return nil, err
 		}
+		expis = append(expis, expi)
 	}
 
 	if c, err := findDuplicateCacheOptions(req.Cache.Exports); err != nil {
@@ -456,10 +471,8 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
 		FrontendInputs: req.FrontendInputs,
 		CacheImports:   cacheImports,
 	}, llbsolver.ExporterRequest{
-		Exporter:       expi,
+		Exporters:      expis,
 		CacheExporters: cacheExporters,
-		Type:           req.Exporter,
-		Attrs:          req.ExporterAttrs,
 	}, req.Entitlements, procs, req.Internal, req.SourcePolicy)
 	if err != nil {
 		return nil, err
@@ -508,10 +521,10 @@ func (c *Controller) Session(stream controlapi.Control_SessionServer) error {
 	conn, closeCh, opts := grpchijack.Hijack(stream)
 	defer conn.Close()
 
-	ctx, cancel := context.WithCancel(stream.Context())
+	ctx, cancel := context.WithCancelCause(stream.Context())
 	go func() {
 		<-closeCh
-		cancel()
+		cancel(errors.WithStack(context.Canceled))
 	}()
 
 	err := c.opt.SessionManager.HandleConn(ctx, conn, opts)

+ 14 - 3
vendor/github.com/moby/buildkit/control/gateway/gateway.go

@@ -8,6 +8,7 @@ import (
 	"github.com/moby/buildkit/client/buildid"
 	"github.com/moby/buildkit/frontend/gateway"
 	gwapi "github.com/moby/buildkit/frontend/gateway/pb"
+	"github.com/moby/buildkit/solver/errdefs"
 	"github.com/pkg/errors"
 	"google.golang.org/grpc"
 )
@@ -58,8 +59,9 @@ func (gwf *GatewayForwarder) lookupForwarder(ctx context.Context) (gateway.LLBBr
 		return nil, errors.New("no buildid found in context")
 	}
 
-	ctx, cancel := context.WithTimeout(ctx, 3*time.Second)
-	defer cancel()
+	ctx, cancel := context.WithCancelCause(ctx)
+	ctx, _ = context.WithTimeoutCause(ctx, 3*time.Second, errors.WithStack(context.DeadlineExceeded))
+	defer cancel(errors.WithStack(context.Canceled))
 
 	go func() {
 		<-ctx.Done()
@@ -73,7 +75,7 @@ func (gwf *GatewayForwarder) lookupForwarder(ctx context.Context) (gateway.LLBBr
 	for {
 		select {
 		case <-ctx.Done():
-			return nil, errors.Errorf("no such job %s", bid)
+			return nil, errdefs.NewUnknownJobError(bid)
 		default:
 		}
 		fwd, ok := gwf.builds[bid]
@@ -94,6 +96,15 @@ func (gwf *GatewayForwarder) ResolveImageConfig(ctx context.Context, req *gwapi.
 	return fwd.ResolveImageConfig(ctx, req)
 }
 
+func (gwf *GatewayForwarder) ResolveSourceMeta(ctx context.Context, req *gwapi.ResolveSourceMetaRequest) (*gwapi.ResolveSourceMetaResponse, error) {
+	fwd, err := gwf.lookupForwarder(ctx)
+	if err != nil {
+		return nil, errors.Wrap(err, "forwarding ResolveSourceMeta")
+	}
+
+	return fwd.ResolveSourceMeta(ctx, req)
+}
+
 func (gwf *GatewayForwarder) Solve(ctx context.Context, req *gwapi.SolveRequest) (*gwapi.SolveResponse, error) {
 	fwd, err := gwf.lookupForwarder(ctx)
 	if err != nil {

+ 75 - 107
vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go

@@ -16,19 +16,13 @@ import (
 	"github.com/containerd/containerd"
 	"github.com/containerd/containerd/cio"
 	"github.com/containerd/containerd/mount"
-	containerdoci "github.com/containerd/containerd/oci"
-	"github.com/containerd/continuity/fs"
-	"github.com/docker/docker/pkg/idtools"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor/oci"
 	resourcestypes "github.com/moby/buildkit/executor/resources/types"
 	gatewayapi "github.com/moby/buildkit/frontend/gateway/pb"
 	"github.com/moby/buildkit/identity"
-	"github.com/moby/buildkit/snapshot"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/util/network"
-	rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv"
-	"github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
 )
 
@@ -38,12 +32,13 @@ type containerdExecutor struct {
 	networkProviders map[pb.NetMode]network.Provider
 	cgroupParent     string
 	dnsConfig        *oci.DNSConfig
-	running          map[string]chan error
+	running          map[string]*containerState
 	mu               sync.Mutex
 	apparmorProfile  string
 	selinux          bool
 	traceSocket      string
 	rootless         bool
+	runtime          *RuntimeInfo
 }
 
 // OnCreateRuntimer provides an alternative to OCI hooks for applying network
@@ -59,8 +54,14 @@ type OnCreateRuntimer interface {
 	OnCreateRuntime(pid uint32) error
 }
 
+type RuntimeInfo struct {
+	Name    string
+	Path    string
+	Options any
+}
+
 // New creates a new executor backed by connection to containerd API
-func New(client *containerd.Client, root, cgroup string, networkProviders map[pb.NetMode]network.Provider, dnsConfig *oci.DNSConfig, apparmorProfile string, selinux bool, traceSocket string, rootless bool) executor.Executor {
+func New(client *containerd.Client, root, cgroup string, networkProviders map[pb.NetMode]network.Provider, dnsConfig *oci.DNSConfig, apparmorProfile string, selinux bool, traceSocket string, rootless bool, runtime *RuntimeInfo) executor.Executor {
 	// clean up old hosts/resolv.conf file. ignore errors
 	os.RemoveAll(filepath.Join(root, "hosts"))
 	os.RemoveAll(filepath.Join(root, "resolv.conf"))
@@ -71,14 +72,25 @@ func New(client *containerd.Client, root, cgroup string, networkProviders map[pb
 		networkProviders: networkProviders,
 		cgroupParent:     cgroup,
 		dnsConfig:        dnsConfig,
-		running:          make(map[string]chan error),
+		running:          make(map[string]*containerState),
 		apparmorProfile:  apparmorProfile,
 		selinux:          selinux,
 		traceSocket:      traceSocket,
 		rootless:         rootless,
+		runtime:          runtime,
 	}
 }
 
+type containerState struct {
+	done chan error
+	// On linux the rootfsPath is used to ensure the CWD exists, to fetch user information
+	// and as a bind mount for the root FS of the container.
+	rootfsPath string
+	// On Windows we need to use the root mounts to achieve the same thing that Linux does
+	// with rootfsPath. So we save both in details.
+	rootMounts []mount.Mount
+}
+
 func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (rec resourcestypes.Recorder, err error) {
 	if id == "" {
 		id = identity.NewID()
@@ -86,8 +98,11 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
 
 	startedOnce := sync.Once{}
 	done := make(chan error, 1)
+	details := &containerState{
+		done: done,
+	}
 	w.mu.Lock()
-	w.running[id] = done
+	w.running[id] = details
 	w.mu.Unlock()
 	defer func() {
 		w.mu.Lock()
@@ -103,96 +118,49 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
 	}()
 
 	meta := process.Meta
-
-	resolvConf, err := oci.GetResolvConf(ctx, w.root, nil, w.dnsConfig)
-	if err != nil {
-		return nil, err
-	}
-
-	hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, nil, meta.Hostname)
-	if err != nil {
-		return nil, err
-	}
-	if clean != nil {
-		defer clean()
+	if meta.NetMode == pb.NetMode_HOST {
+		bklog.G(ctx).Info("enabling HostNetworking")
 	}
 
-	mountable, err := root.Src.Mount(ctx, false)
-	if err != nil {
-		return nil, err
+	provider, ok := w.networkProviders[meta.NetMode]
+	if !ok {
+		return nil, errors.Errorf("unknown network mode %s", meta.NetMode)
 	}
 
-	rootMounts, release, err := mountable.Mount()
+	resolvConf, hostsFile, releasers, err := w.prepareExecutionEnv(ctx, root, mounts, meta, details, meta.NetMode)
 	if err != nil {
 		return nil, err
 	}
-	if release != nil {
-		defer release()
-	}
 
-	lm := snapshot.LocalMounterWithMounts(rootMounts)
-	rootfsPath, err := lm.Mount()
-	if err != nil {
-		return nil, err
+	if releasers != nil {
+		defer releasers()
 	}
-	defer lm.Unmount()
-	defer executor.MountStubsCleaner(ctx, rootfsPath, mounts, meta.RemoveMountStubsRecursive)()
 
-	uid, gid, sgids, err := oci.GetUser(rootfsPath, meta.User)
-	if err != nil {
+	if err := w.ensureCWD(ctx, details, meta); err != nil {
 		return nil, err
 	}
 
-	identity := idtools.Identity{
-		UID: int(uid),
-		GID: int(gid),
-	}
-
-	newp, err := fs.RootPath(rootfsPath, meta.Cwd)
-	if err != nil {
-		return nil, errors.Wrapf(err, "working dir %s points to invalid target", newp)
-	}
-	if _, err := os.Stat(newp); err != nil {
-		if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
-			return nil, errors.Wrapf(err, "failed to create working directory %s", newp)
-		}
-	}
-
-	provider, ok := w.networkProviders[meta.NetMode]
-	if !ok {
-		return nil, errors.Errorf("unknown network mode %s", meta.NetMode)
-	}
 	namespace, err := provider.New(ctx, meta.Hostname)
 	if err != nil {
 		return nil, err
 	}
 	defer namespace.Close()
 
-	if meta.NetMode == pb.NetMode_HOST {
-		bklog.G(ctx).Info("enabling HostNetworking")
-	}
-
-	opts := []containerdoci.SpecOpts{oci.WithUIDGID(uid, gid, sgids)}
-	if meta.ReadonlyRootFS {
-		opts = append(opts, containerdoci.WithRootFSReadonly())
-	}
-
-	processMode := oci.ProcessSandbox // FIXME(AkihiroSuda)
-	spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.selinux, w.traceSocket, opts...)
+	spec, releaseSpec, err := w.createOCISpec(ctx, id, resolvConf, hostsFile, namespace, mounts, meta, details)
 	if err != nil {
 		return nil, err
 	}
-	defer cleanup()
-	spec.Process.Terminal = meta.Tty
-	if w.rootless {
-		if err := rootlessspecconv.ToRootless(spec); err != nil {
-			return nil, err
-		}
+	if releaseSpec != nil {
+		defer releaseSpec()
 	}
 
-	container, err := w.client.NewContainer(ctx, id,
+	opts := []containerd.NewContainerOpts{
 		containerd.WithSpec(spec),
-	)
+	}
+	if w.runtime != nil {
+		opts = append(opts, containerd.WithRuntime(w.runtime.Name, w.runtime.Options))
+	}
+	container, err := w.client.NewContainer(ctx, id, opts...)
 	if err != nil {
 		return nil, err
 	}
@@ -209,11 +177,14 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
 		cioOpts = append(cioOpts, cio.WithTerminal)
 	}
 
-	task, err := container.NewTask(ctx, cio.NewCreator(cioOpts...), containerd.WithRootFS([]mount.Mount{{
-		Source:  rootfsPath,
-		Type:    "bind",
-		Options: []string{"rbind"},
-	}}))
+	taskOpts, err := details.getTaskOpts()
+	if err != nil {
+		return nil, err
+	}
+	if w.runtime != nil && w.runtime.Path != "" {
+		taskOpts = append(taskOpts, containerd.WithRuntimePath(w.runtime.Path))
+	}
+	task, err := container.NewTask(ctx, cio.NewCreator(cioOpts...), taskOpts...)
 	if err != nil {
 		return nil, err
 	}
@@ -249,17 +220,16 @@ func (w *containerdExecutor) Exec(ctx context.Context, id string, process execut
 	// is in the process of being created and check again every 100ms or until
 	// context is canceled.
 
+	w.mu.Lock()
+	details, ok := w.running[id]
+	w.mu.Unlock()
+
+	if !ok {
+		return errors.Errorf("container %s not found", id)
+	}
 	var container containerd.Container
 	var task containerd.Task
 	for {
-		w.mu.Lock()
-		done, ok := w.running[id]
-		w.mu.Unlock()
-
-		if !ok {
-			return errors.Errorf("container %s not found", id)
-		}
-
 		if container == nil {
 			container, _ = w.client.LoadContainer(ctx, id)
 		}
@@ -274,8 +244,8 @@ func (w *containerdExecutor) Exec(ctx context.Context, id string, process execut
 		}
 		select {
 		case <-ctx.Done():
-			return ctx.Err()
-		case err, ok := <-done:
+			return context.Cause(ctx)
+		case err, ok := <-details.done:
 			if !ok || err == nil {
 				return errors.Errorf("container %s has stopped", id)
 			}
@@ -291,23 +261,20 @@ func (w *containerdExecutor) Exec(ctx context.Context, id string, process execut
 	}
 
 	proc := spec.Process
-
-	// TODO how do we get rootfsPath for oci.GetUser in case user passed in username rather than uid:gid?
-	// For now only support uid:gid
 	if meta.User != "" {
-		uid, gid, err := oci.ParseUIDGID(meta.User)
+		userSpec, err := getUserSpec(meta.User, details.rootfsPath)
 		if err != nil {
 			return errors.WithStack(err)
 		}
-		proc.User = specs.User{
-			UID:            uid,
-			GID:            gid,
-			AdditionalGids: []uint32{},
-		}
+		proc.User = userSpec
 	}
 
 	proc.Terminal = meta.Tty
-	proc.Args = meta.Args
+	// setArgs will set the proper command line arguments for this process.
+	// On Windows, this will set the CommandLine field. On Linux it will set the
+	// Args field.
+	setArgs(proc, meta.Args)
+
 	if meta.Cwd != "" {
 		spec.Process.Cwd = meta.Cwd
 	}
@@ -370,8 +337,8 @@ func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Proces
 
 	// handle signals (and resize) in separate go loop so it does not
 	// potentially block the container cancel/exit status loop below.
-	eventCtx, eventCancel := context.WithCancel(ctx)
-	defer eventCancel()
+	eventCtx, eventCancel := context.WithCancelCause(ctx)
+	defer eventCancel(errors.WithStack(context.Canceled))
 	go func() {
 		for {
 			select {
@@ -405,7 +372,7 @@ func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Proces
 		}
 	}()
 
-	var cancel func()
+	var cancel func(error)
 	var killCtxDone <-chan struct{}
 	ctxDone := ctx.Done()
 	for {
@@ -413,13 +380,14 @@ func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Proces
 		case <-ctxDone:
 			ctxDone = nil
 			var killCtx context.Context
-			killCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second)
+			killCtx, cancel = context.WithCancelCause(context.Background())
+			killCtx, _ = context.WithTimeoutCause(killCtx, 10*time.Second, errors.WithStack(context.DeadlineExceeded))
 			killCtxDone = killCtx.Done()
 			p.Kill(killCtx, syscall.SIGKILL)
 			io.Cancel()
 		case status := <-statusCh:
 			if cancel != nil {
-				cancel()
+				cancel(errors.WithStack(context.Canceled))
 			}
 			trace.SpanFromContext(ctx).AddEvent(
 				"Container exited",
@@ -437,7 +405,7 @@ func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Proces
 				}
 				select {
 				case <-ctx.Done():
-					exitErr.Err = errors.Wrap(ctx.Err(), exitErr.Error())
+					exitErr.Err = errors.Wrap(context.Cause(ctx), exitErr.Error())
 				default:
 				}
 				return exitErr
@@ -445,7 +413,7 @@ func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Proces
 			return nil
 		case <-killCtxDone:
 			if cancel != nil {
-				cancel()
+				cancel(errors.WithStack(context.Canceled))
 			}
 			io.Cancel()
 			return errors.Errorf("failed to kill process on cancel")

+ 183 - 0
vendor/github.com/moby/buildkit/executor/containerdexecutor/executor_unix.go

@@ -0,0 +1,183 @@
+//go:build !windows
+// +build !windows
+
+package containerdexecutor
+
+import (
+	"context"
+	"os"
+	"runtime"
+
+	"github.com/containerd/containerd"
+	"github.com/containerd/containerd/mount"
+	containerdoci "github.com/containerd/containerd/oci"
+	"github.com/containerd/continuity/fs"
+	"github.com/docker/docker/pkg/idtools"
+	"github.com/moby/buildkit/executor"
+	"github.com/moby/buildkit/executor/oci"
+	"github.com/moby/buildkit/snapshot"
+	"github.com/moby/buildkit/solver/pb"
+	"github.com/moby/buildkit/util/bklog"
+	"github.com/moby/buildkit/util/network"
+	rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv"
+	"github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+)
+
+func getUserSpec(user, rootfsPath string) (specs.User, error) {
+	var err error
+	var uid, gid uint32
+	var sgids []uint32
+	if rootfsPath != "" {
+		uid, gid, sgids, err = oci.GetUser(rootfsPath, user)
+	} else {
+		uid, gid, err = oci.ParseUIDGID(user)
+	}
+	if err != nil {
+		return specs.User{}, errors.WithStack(err)
+	}
+	return specs.User{
+		UID:            uid,
+		GID:            gid,
+		AdditionalGids: sgids,
+	}, nil
+}
+
+func (w *containerdExecutor) prepareExecutionEnv(ctx context.Context, rootMount executor.Mount, mounts []executor.Mount, meta executor.Meta, details *containerState, netMode pb.NetMode) (string, string, func(), error) {
+	var releasers []func()
+	releaseAll := func() {
+		for i := len(releasers) - 1; i >= 0; i-- {
+			releasers[i]()
+		}
+	}
+
+	resolvConf, err := oci.GetResolvConf(ctx, w.root, nil, w.dnsConfig, netMode)
+	if err != nil {
+		releaseAll()
+		return "", "", nil, err
+	}
+
+	hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, nil, meta.Hostname)
+	if err != nil {
+		releaseAll()
+		return "", "", nil, err
+	}
+	if clean != nil {
+		releasers = append(releasers, clean)
+	}
+	mountable, err := rootMount.Src.Mount(ctx, false)
+	if err != nil {
+		releaseAll()
+		return "", "", nil, err
+	}
+
+	rootMounts, release, err := mountable.Mount()
+	if err != nil {
+		releaseAll()
+		return "", "", nil, err
+	}
+	details.rootMounts = rootMounts
+
+	if release != nil {
+		releasers = append(releasers, func() {
+			if err := release(); err != nil {
+				bklog.G(ctx).WithError(err).Error("failed to release root mount")
+			}
+		})
+	}
+	lm := snapshot.LocalMounterWithMounts(rootMounts)
+	rootfsPath, err := lm.Mount()
+	if err != nil {
+		releaseAll()
+		return "", "", nil, err
+	}
+	details.rootfsPath = rootfsPath
+	releasers = append(releasers, func() {
+		if err := lm.Unmount(); err != nil {
+			bklog.G(ctx).WithError(err).Error("failed to unmount rootfs")
+		}
+	})
+	releasers = append(releasers, executor.MountStubsCleaner(ctx, details.rootfsPath, mounts, meta.RemoveMountStubsRecursive))
+
+	return resolvConf, hostsFile, releaseAll, nil
+}
+
+func (w *containerdExecutor) ensureCWD(ctx context.Context, details *containerState, meta executor.Meta) error {
+	newp, err := fs.RootPath(details.rootfsPath, meta.Cwd)
+	if err != nil {
+		return errors.Wrapf(err, "working dir %s points to invalid target", newp)
+	}
+
+	uid, gid, _, err := oci.GetUser(details.rootfsPath, meta.User)
+	if err != nil {
+		return err
+	}
+
+	identity := idtools.Identity{
+		UID: int(uid),
+		GID: int(gid),
+	}
+
+	if _, err := os.Stat(newp); err != nil {
+		if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
+			return errors.Wrapf(err, "failed to create working directory %s", newp)
+		}
+	}
+	return nil
+}
+
+func (w *containerdExecutor) createOCISpec(ctx context.Context, id, resolvConf, hostsFile string, namespace network.Namespace, mounts []executor.Mount, meta executor.Meta, details *containerState) (*specs.Spec, func(), error) {
+	var releasers []func()
+	releaseAll := func() {
+		for i := len(releasers) - 1; i >= 0; i-- {
+			releasers[i]()
+		}
+	}
+
+	uid, gid, sgids, err := oci.GetUser(details.rootfsPath, meta.User)
+	if err != nil {
+		releaseAll()
+		return nil, nil, err
+	}
+
+	opts := []containerdoci.SpecOpts{oci.WithUIDGID(uid, gid, sgids)}
+	if meta.ReadonlyRootFS {
+		opts = append(opts, containerdoci.WithRootFSReadonly())
+	}
+
+	processMode := oci.ProcessSandbox // FIXME(AkihiroSuda)
+	spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.selinux, w.traceSocket, opts...)
+	if err != nil {
+		releaseAll()
+		return nil, nil, err
+	}
+	releasers = append(releasers, cleanup)
+	spec.Process.Terminal = meta.Tty
+	if w.rootless {
+		if err := rootlessspecconv.ToRootless(spec); err != nil {
+			releaseAll()
+			return nil, nil, err
+		}
+	}
+	return spec, releaseAll, nil
+}
+
+func (d *containerState) getTaskOpts() ([]containerd.NewTaskOpts, error) {
+	rootfs := containerd.WithRootFS([]mount.Mount{{
+		Source:  d.rootfsPath,
+		Type:    "bind",
+		Options: []string{"rbind"},
+	}})
+	if runtime.GOOS == "freebsd" {
+		rootfs = containerd.WithRootFS([]mount.Mount{{
+			Source:  d.rootfsPath,
+			Type:    "nullfs",
+			Options: []string{},
+		}})
+	}
+	return []containerd.NewTaskOpts{rootfs}, nil
+}
+
+func setArgs(spec *specs.Process, args []string) {
+	spec.Args = args
+}

+ 106 - 0
vendor/github.com/moby/buildkit/executor/containerdexecutor/executor_windows.go

@@ -0,0 +1,106 @@
+package containerdexecutor
+
+import (
+	"context"
+	"os"
+	"strings"
+
+	"github.com/containerd/containerd"
+	containerdoci "github.com/containerd/containerd/oci"
+	"github.com/containerd/continuity/fs"
+	"github.com/docker/docker/pkg/idtools"
+	"github.com/moby/buildkit/executor"
+	"github.com/moby/buildkit/executor/oci"
+	"github.com/moby/buildkit/snapshot"
+	"github.com/moby/buildkit/solver/pb"
+	"github.com/moby/buildkit/util/network"
+	"github.com/moby/buildkit/util/windows"
+	"github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+)
+
+func getUserSpec(user, rootfsPath string) (specs.User, error) {
+	return specs.User{
+		Username: user,
+	}, nil
+}
+
+func (w *containerdExecutor) prepareExecutionEnv(ctx context.Context, rootMount executor.Mount, mounts []executor.Mount, meta executor.Meta, details *containerState, netMode pb.NetMode) (string, string, func(), error) {
+	var releasers []func() error
+	releaseAll := func() {
+		for _, release := range releasers {
+			release()
+		}
+	}
+
+	mountable, err := rootMount.Src.Mount(ctx, false)
+	if err != nil {
+		return "", "", releaseAll, err
+	}
+
+	rootMounts, release, err := mountable.Mount()
+	if err != nil {
+		return "", "", releaseAll, err
+	}
+	details.rootMounts = rootMounts
+	releasers = append(releasers, release)
+
+	return "", "", releaseAll, nil
+}
+
+func (w *containerdExecutor) ensureCWD(ctx context.Context, details *containerState, meta executor.Meta) (err error) {
+	// TODO(gabriel-samfira): Use a snapshot?
+	identity, err := windows.ResolveUsernameToSID(ctx, w, details.rootMounts, meta.User)
+	if err != nil {
+		return errors.Wrap(err, "getting user SID")
+	}
+
+	lm := snapshot.LocalMounterWithMounts(details.rootMounts)
+	rootfsPath, err := lm.Mount()
+	if err != nil {
+		return err
+	}
+	defer lm.Unmount()
+
+	newp, err := fs.RootPath(rootfsPath, meta.Cwd)
+	if err != nil {
+		return errors.Wrapf(err, "working dir %s points to invalid target", newp)
+	}
+
+	if _, err := os.Stat(newp); err != nil {
+		if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
+			return errors.Wrapf(err, "failed to create working directory %s", newp)
+		}
+	}
+	return nil
+}
+
+func (w *containerdExecutor) createOCISpec(ctx context.Context, id, resolvConf, hostsFile string, namespace network.Namespace, mounts []executor.Mount, meta executor.Meta, details *containerState) (*specs.Spec, func(), error) {
+	var releasers []func()
+	releaseAll := func() {
+		for _, release := range releasers {
+			release()
+		}
+	}
+
+	opts := []containerdoci.SpecOpts{
+		containerdoci.WithUser(meta.User),
+	}
+
+	processMode := oci.ProcessSandbox // FIXME(AkihiroSuda)
+	spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, "", "", namespace, "", processMode, nil, "", false, w.traceSocket, opts...)
+	if err != nil {
+		releaseAll()
+		return nil, nil, err
+	}
+	releasers = append(releasers, cleanup)
+	return spec, releaseAll, nil
+}
+
+func (d *containerState) getTaskOpts() ([]containerd.NewTaskOpts, error) {
+	return []containerd.NewTaskOpts{containerd.WithRootFS(d.rootMounts)}, nil
+}
+
+func setArgs(spec *specs.Process, args []string) {
+	spec.CommandLine = strings.Join(args, " ")
+}

+ 0 - 67
vendor/github.com/moby/buildkit/executor/oci/mounts.go

@@ -24,30 +24,6 @@ func withRemovedMount(destination string) oci.SpecOpts {
 	}
 }
 
-func withROBind(src, dest string) oci.SpecOpts {
-	return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
-		s.Mounts = append(s.Mounts, specs.Mount{
-			Destination: dest,
-			Type:        "bind",
-			Source:      src,
-			Options:     []string{"nosuid", "noexec", "nodev", "rbind", "ro"},
-		})
-		return nil
-	}
-}
-
-func withCGroup() oci.SpecOpts {
-	return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
-		s.Mounts = append(s.Mounts, specs.Mount{
-			Destination: "/sys/fs/cgroup",
-			Type:        "cgroup",
-			Source:      "cgroup",
-			Options:     []string{"ro", "nosuid", "noexec", "nodev"},
-		})
-		return nil
-	}
-}
-
 func hasPrefix(p, prefixDir string) bool {
 	prefixDir = filepath.Clean(prefixDir)
 	if filepath.Base(prefixDir) == string(filepath.Separator) {
@@ -57,49 +33,6 @@ func hasPrefix(p, prefixDir string) bool {
 	return p == prefixDir || strings.HasPrefix(p, prefixDir+string(filepath.Separator))
 }
 
-func removeMountsWithPrefix(mounts []specs.Mount, prefixDir string) []specs.Mount {
-	var ret []specs.Mount
-	for _, m := range mounts {
-		if !hasPrefix(m.Destination, prefixDir) {
-			ret = append(ret, m)
-		}
-	}
-	return ret
-}
-
-func withBoundProc() oci.SpecOpts {
-	return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
-		s.Mounts = removeMountsWithPrefix(s.Mounts, "/proc")
-		procMount := specs.Mount{
-			Destination: "/proc",
-			Type:        "bind",
-			Source:      "/proc",
-			// NOTE: "rbind"+"ro" does not make /proc read-only recursively.
-			// So we keep maskedPath and readonlyPaths (although not mandatory for rootless mode)
-			Options: []string{"rbind"},
-		}
-		s.Mounts = append([]specs.Mount{procMount}, s.Mounts...)
-
-		var maskedPaths []string
-		for _, s := range s.Linux.MaskedPaths {
-			if !hasPrefix(s, "/proc") {
-				maskedPaths = append(maskedPaths, s)
-			}
-		}
-		s.Linux.MaskedPaths = maskedPaths
-
-		var readonlyPaths []string
-		for _, s := range s.Linux.ReadonlyPaths {
-			if !hasPrefix(s, "/proc") {
-				readonlyPaths = append(readonlyPaths, s)
-			}
-		}
-		s.Linux.ReadonlyPaths = readonlyPaths
-
-		return nil
-	}
-}
-
 func dedupMounts(mnts []specs.Mount) []specs.Mount {
 	ret := make([]specs.Mount, 0, len(mnts))
 	visited := make(map[string]int)

+ 15 - 8
vendor/github.com/moby/buildkit/executor/oci/resolvconf.go

@@ -7,6 +7,7 @@ import (
 
 	"github.com/docker/docker/libnetwork/resolvconf"
 	"github.com/docker/docker/pkg/idtools"
+	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/util/flightcontrol"
 	"github.com/pkg/errors"
 )
@@ -24,9 +25,13 @@ type DNSConfig struct {
 	SearchDomains []string
 }
 
-func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.IdentityMapping, dns *DNSConfig) (string, error) {
+func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.IdentityMapping, dns *DNSConfig, netMode pb.NetMode) (string, error) {
 	p := filepath.Join(stateDir, "resolv.conf")
-	_, err := g.Do(ctx, stateDir, func(ctx context.Context) (struct{}, error) {
+	if netMode == pb.NetMode_HOST {
+		p = filepath.Join(stateDir, "resolv-host.conf")
+	}
+
+	_, err := g.Do(ctx, p, func(ctx context.Context) (struct{}, error) {
 		generate := !notFirstRun
 		notFirstRun = true
 
@@ -65,7 +70,6 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity
 			return struct{}{}, err
 		}
 
-		var f *resolvconf.File
 		tmpPath := p + ".tmp"
 		if dns != nil {
 			var (
@@ -83,19 +87,22 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity
 				dnsOptions = resolvconf.GetOptions(dt)
 			}
 
-			f, err = resolvconf.Build(tmpPath, dnsNameservers, dnsSearchDomains, dnsOptions)
+			f, err := resolvconf.Build(tmpPath, dnsNameservers, dnsSearchDomains, dnsOptions)
 			if err != nil {
 				return struct{}{}, err
 			}
 			dt = f.Content
 		}
 
-		f, err = resolvconf.FilterResolvDNS(dt, true)
-		if err != nil {
-			return struct{}{}, err
+		if netMode != pb.NetMode_HOST || len(resolvconf.GetNameservers(dt, resolvconf.IP)) == 0 {
+			f, err := resolvconf.FilterResolvDNS(dt, true)
+			if err != nil {
+				return struct{}{}, err
+			}
+			dt = f.Content
 		}
 
-		if err := os.WriteFile(tmpPath, f.Content, 0644); err != nil {
+		if err := os.WriteFile(tmpPath, dt, 0644); err != nil {
 			return struct{}{}, err
 		}
 

+ 18 - 20
vendor/github.com/moby/buildkit/executor/oci/spec.go

@@ -4,6 +4,7 @@ import (
 	"context"
 	"path"
 	"path/filepath"
+	"runtime"
 	"strings"
 	"sync"
 
@@ -124,7 +125,7 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou
 	}
 
 	opts = append(opts,
-		oci.WithProcessArgs(meta.Args...),
+		withProcessArgs(meta.Args...),
 		oci.WithEnv(meta.Env),
 		oci.WithProcessCwd(meta.Cwd),
 		oci.WithNewPrivileges,
@@ -196,7 +197,9 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou
 	}
 
 	if tracingSocket != "" {
-		s.Mounts = append(s.Mounts, getTracingSocketMount(tracingSocket))
+		if mount := getTracingSocketMount(tracingSocket); mount != nil {
+			s.Mounts = append(s.Mounts, *mount)
+		}
 	}
 
 	s.Mounts = dedupMounts(s.Mounts)
@@ -254,17 +257,24 @@ func (s *submounts) subMount(m mount.Mount, subPath string) (mount.Mount, error)
 		return mount.Mount{}, err
 	}
 
-	opts := []string{"rbind"}
-	for _, opt := range m.Options {
-		if opt == "ro" {
-			opts = append(opts, opt)
-		}
+	var mntType string
+	opts := []string{}
+	if m.ReadOnly() {
+		opts = append(opts, "ro")
+	}
+
+	if runtime.GOOS != "windows" {
+		// Windows uses a mechanism similar to bind mounts, but will err out if we request
+		// a mount type it does not understand. Leaving the mount type empty on Windows will
+		// yield the same result.
+		mntType = "bind"
+		opts = append(opts, "rbind")
 	}
 
 	s.m[h] = mountRef{
 		mount: mount.Mount{
 			Source:  mp,
-			Type:    "bind",
+			Type:    mntType,
 			Options: opts,
 		},
 		unmount: lm.Unmount,
@@ -298,15 +308,3 @@ func (s *submounts) cleanup() {
 	}
 	wg.Wait()
 }
-
-func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping {
-	var ids []specs.LinuxIDMapping
-	for _, item := range s {
-		ids = append(ids, specs.LinuxIDMapping{
-			HostID:      uint32(item.HostID),
-			ContainerID: uint32(item.ContainerID),
-			Size:        uint32(item.Size),
-		})
-	}
-	return ids
-}

+ 57 - 0
vendor/github.com/moby/buildkit/executor/oci/spec_freebsd.go

@@ -2,9 +2,66 @@ package oci
 
 import (
 	"github.com/containerd/containerd/mount"
+	"github.com/containerd/containerd/oci"
 	"github.com/containerd/continuity/fs"
+	"github.com/docker/docker/pkg/idtools"
+	"github.com/moby/buildkit/solver/pb"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
 )
 
+func withProcessArgs(args ...string) oci.SpecOpts {
+	return oci.WithProcessArgs(args...)
+}
+
+func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) {
+	return nil, nil
+}
+
+// generateSecurityOpts may affect mounts, so must be called after generateMountOpts
+func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string, selinuxB bool) ([]oci.SpecOpts, error) {
+	if mode == pb.SecurityMode_INSECURE {
+		return nil, errors.New("no support for running in insecure mode on FreeBSD")
+	}
+	return nil, nil
+}
+
+// generateProcessModeOpts may affect mounts, so must be called after generateMountOpts
+func generateProcessModeOpts(mode ProcessMode) ([]oci.SpecOpts, error) {
+	if mode == NoProcessSandbox {
+		return nil, errors.New("no support for NoProcessSandbox on FreeBSD")
+	}
+	return nil, nil
+}
+
+func generateIDmapOpts(idmap *idtools.IdentityMapping) ([]oci.SpecOpts, error) {
+	if idmap == nil {
+		return nil, nil
+	}
+	return nil, errors.New("no support for IdentityMapping on FreeBSD")
+}
+
+func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) {
+	if len(ulimits) == 0 {
+		return nil, nil
+	}
+	return nil, errors.New("no support for POSIXRlimit on FreeBSD")
+}
+
+// tracing is not implemented on FreeBSD
+func getTracingSocketMount(socket string) *specs.Mount {
+	return nil
+}
+
+// tracing is not implemented on FreeBSD
+func getTracingSocket() string {
+	return ""
+}
+
+func cgroupV2NamespaceSupported() bool {
+	return false
+}
+
 func sub(m mount.Mount, subPath string) (mount.Mount, func() error, error) {
 	src, err := fs.RootPath(m.Source, subPath)
 	if err != nil {

+ 238 - 3
vendor/github.com/moby/buildkit/executor/oci/spec_linux.go

@@ -1,19 +1,254 @@
-//go:build linux
-// +build linux
-
 package oci
 
 import (
+	"context"
+	"fmt"
 	"os"
 	"strconv"
+	"strings"
+	"sync"
 
+	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/mount"
+	"github.com/containerd/containerd/oci"
+	cdseccomp "github.com/containerd/containerd/pkg/seccomp"
 	"github.com/containerd/continuity/fs"
+	"github.com/docker/docker/pkg/idtools"
+	"github.com/docker/docker/profiles/seccomp"
 	"github.com/moby/buildkit/snapshot"
+	"github.com/moby/buildkit/solver/pb"
+	"github.com/moby/buildkit/util/entitlements/security"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+	selinux "github.com/opencontainers/selinux/go-selinux"
+	"github.com/opencontainers/selinux/go-selinux/label"
 	"github.com/pkg/errors"
 	"golang.org/x/sys/unix"
 )
 
+var (
+	cgroupNSOnce     sync.Once
+	supportsCgroupNS bool
+)
+
+const (
+	tracingSocketPath = "/dev/otel-grpc.sock"
+)
+
+func withProcessArgs(args ...string) oci.SpecOpts {
+	return oci.WithProcessArgs(args...)
+}
+
+func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) {
+	return []oci.SpecOpts{
+		// https://github.com/moby/buildkit/issues/429
+		withRemovedMount("/run"),
+		withROBind(resolvConf, "/etc/resolv.conf"),
+		withROBind(hostsFile, "/etc/hosts"),
+		withCGroup(),
+	}, nil
+}
+
+// generateSecurityOpts may affect mounts, so must be called after generateMountOpts
+func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string, selinuxB bool) (opts []oci.SpecOpts, _ error) {
+	if selinuxB && !selinux.GetEnabled() {
+		return nil, errors.New("selinux is not available")
+	}
+	switch mode {
+	case pb.SecurityMode_INSECURE:
+		return []oci.SpecOpts{
+			security.WithInsecureSpec(),
+			oci.WithWriteableCgroupfs,
+			oci.WithWriteableSysfs,
+			func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error {
+				var err error
+				if selinuxB {
+					s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels([]string{"disable"})
+				}
+				return err
+			},
+		}, nil
+	case pb.SecurityMode_SANDBOX:
+		if cdseccomp.IsEnabled() {
+			opts = append(opts, withDefaultProfile())
+		}
+		if apparmorProfile != "" {
+			opts = append(opts, oci.WithApparmorProfile(apparmorProfile))
+		}
+		opts = append(opts, func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error {
+			var err error
+			if selinuxB {
+				s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels(nil)
+			}
+			return err
+		})
+		return opts, nil
+	}
+	return nil, nil
+}
+
+// generateProcessModeOpts may affect mounts, so must be called after generateMountOpts
+func generateProcessModeOpts(mode ProcessMode) ([]oci.SpecOpts, error) {
+	if mode == NoProcessSandbox {
+		return []oci.SpecOpts{
+			oci.WithHostNamespace(specs.PIDNamespace),
+			withBoundProc(),
+		}, nil
+		// TODO(AkihiroSuda): Configure seccomp to disable ptrace (and prctl?) explicitly
+	}
+	return nil, nil
+}
+
+func generateIDmapOpts(idmap *idtools.IdentityMapping) ([]oci.SpecOpts, error) {
+	if idmap == nil {
+		return nil, nil
+	}
+	return []oci.SpecOpts{
+		oci.WithUserNamespace(specMapping(idmap.UIDMaps), specMapping(idmap.GIDMaps)),
+	}, nil
+}
+
+func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping {
+	var ids []specs.LinuxIDMapping
+	for _, item := range s {
+		ids = append(ids, specs.LinuxIDMapping{
+			HostID:      uint32(item.HostID),
+			ContainerID: uint32(item.ContainerID),
+			Size:        uint32(item.Size),
+		})
+	}
+	return ids
+}
+
+func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) {
+	if len(ulimits) == 0 {
+		return nil, nil
+	}
+	var rlimits []specs.POSIXRlimit
+	for _, u := range ulimits {
+		if u == nil {
+			continue
+		}
+		rlimits = append(rlimits, specs.POSIXRlimit{
+			Type: fmt.Sprintf("RLIMIT_%s", strings.ToUpper(u.Name)),
+			Hard: uint64(u.Hard),
+			Soft: uint64(u.Soft),
+		})
+	}
+	return []oci.SpecOpts{
+		func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
+			s.Process.Rlimits = rlimits
+			return nil
+		},
+	}, nil
+}
+
+// withDefaultProfile sets the default seccomp profile to the spec.
+// Note: must follow the setting of process capabilities
+func withDefaultProfile() oci.SpecOpts {
+	return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
+		var err error
+		s.Linux.Seccomp, err = seccomp.GetDefaultProfile(s)
+		return err
+	}
+}
+
+func withROBind(src, dest string) oci.SpecOpts {
+	return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
+		s.Mounts = append(s.Mounts, specs.Mount{
+			Destination: dest,
+			Type:        "bind",
+			Source:      src,
+			Options:     []string{"nosuid", "noexec", "nodev", "rbind", "ro"},
+		})
+		return nil
+	}
+}
+
+func withCGroup() oci.SpecOpts {
+	return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
+		s.Mounts = append(s.Mounts, specs.Mount{
+			Destination: "/sys/fs/cgroup",
+			Type:        "cgroup",
+			Source:      "cgroup",
+			Options:     []string{"ro", "nosuid", "noexec", "nodev"},
+		})
+		return nil
+	}
+}
+
+func withBoundProc() oci.SpecOpts {
+	return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
+		s.Mounts = removeMountsWithPrefix(s.Mounts, "/proc")
+		procMount := specs.Mount{
+			Destination: "/proc",
+			Type:        "bind",
+			Source:      "/proc",
+			// NOTE: "rbind"+"ro" does not make /proc read-only recursively.
+			// So we keep maskedPath and readonlyPaths (although not mandatory for rootless mode)
+			Options: []string{"rbind"},
+		}
+		s.Mounts = append([]specs.Mount{procMount}, s.Mounts...)
+
+		var maskedPaths []string
+		for _, s := range s.Linux.MaskedPaths {
+			if !hasPrefix(s, "/proc") {
+				maskedPaths = append(maskedPaths, s)
+			}
+		}
+		s.Linux.MaskedPaths = maskedPaths
+
+		var readonlyPaths []string
+		for _, s := range s.Linux.ReadonlyPaths {
+			if !hasPrefix(s, "/proc") {
+				readonlyPaths = append(readonlyPaths, s)
+			}
+		}
+		s.Linux.ReadonlyPaths = readonlyPaths
+
+		return nil
+	}
+}
+
+func removeMountsWithPrefix(mounts []specs.Mount, prefixDir string) []specs.Mount {
+	var ret []specs.Mount
+	for _, m := range mounts {
+		if !hasPrefix(m.Destination, prefixDir) {
+			ret = append(ret, m)
+		}
+	}
+	return ret
+}
+
+func getTracingSocketMount(socket string) *specs.Mount {
+	return &specs.Mount{
+		Destination: tracingSocketPath,
+		Type:        "bind",
+		Source:      socket,
+		Options:     []string{"ro", "rbind"},
+	}
+}
+
+func getTracingSocket() string {
+	return fmt.Sprintf("unix://%s", tracingSocketPath)
+}
+
+func cgroupV2NamespaceSupported() bool {
+	// Check if cgroups v2 namespaces are supported.  Trying to do cgroup
+	// namespaces with cgroups v1 results in EINVAL when we encounter a
+	// non-standard hierarchy.
+	// See https://github.com/moby/buildkit/issues/4108
+	cgroupNSOnce.Do(func() {
+		if _, err := os.Stat("/proc/self/ns/cgroup"); os.IsNotExist(err) {
+			return
+		}
+		if _, err := os.Stat("/sys/fs/cgroup/cgroup.subtree_control"); os.IsNotExist(err) {
+			return
+		}
+		supportsCgroupNS = true
+	})
+	return supportsCgroupNS
+}
+
 func sub(m mount.Mount, subPath string) (mount.Mount, func() error, error) {
 	var retries = 10
 	root := m.Source

+ 0 - 165
vendor/github.com/moby/buildkit/executor/oci/spec_unix.go

@@ -1,165 +0,0 @@
-//go:build !windows
-// +build !windows
-
-package oci
-
-import (
-	"context"
-	"fmt"
-	"os"
-	"strings"
-	"sync"
-
-	"github.com/containerd/containerd/containers"
-	"github.com/containerd/containerd/oci"
-	cdseccomp "github.com/containerd/containerd/pkg/seccomp"
-	"github.com/docker/docker/pkg/idtools"
-	"github.com/docker/docker/profiles/seccomp"
-	"github.com/moby/buildkit/solver/pb"
-	"github.com/moby/buildkit/util/entitlements/security"
-	specs "github.com/opencontainers/runtime-spec/specs-go"
-	selinux "github.com/opencontainers/selinux/go-selinux"
-	"github.com/opencontainers/selinux/go-selinux/label"
-	"github.com/pkg/errors"
-)
-
-var (
-	cgroupNSOnce     sync.Once
-	supportsCgroupNS bool
-)
-
-const (
-	tracingSocketPath = "/dev/otel-grpc.sock"
-)
-
-func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) {
-	return []oci.SpecOpts{
-		// https://github.com/moby/buildkit/issues/429
-		withRemovedMount("/run"),
-		withROBind(resolvConf, "/etc/resolv.conf"),
-		withROBind(hostsFile, "/etc/hosts"),
-		withCGroup(),
-	}, nil
-}
-
-// generateSecurityOpts may affect mounts, so must be called after generateMountOpts
-func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string, selinuxB bool) (opts []oci.SpecOpts, _ error) {
-	if selinuxB && !selinux.GetEnabled() {
-		return nil, errors.New("selinux is not available")
-	}
-	switch mode {
-	case pb.SecurityMode_INSECURE:
-		return []oci.SpecOpts{
-			security.WithInsecureSpec(),
-			oci.WithWriteableCgroupfs,
-			oci.WithWriteableSysfs,
-			func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error {
-				var err error
-				if selinuxB {
-					s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels([]string{"disable"})
-				}
-				return err
-			},
-		}, nil
-	case pb.SecurityMode_SANDBOX:
-		if cdseccomp.IsEnabled() {
-			opts = append(opts, withDefaultProfile())
-		}
-		if apparmorProfile != "" {
-			opts = append(opts, oci.WithApparmorProfile(apparmorProfile))
-		}
-		opts = append(opts, func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error {
-			var err error
-			if selinuxB {
-				s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels(nil)
-			}
-			return err
-		})
-		return opts, nil
-	}
-	return nil, nil
-}
-
-// generateProcessModeOpts may affect mounts, so must be called after generateMountOpts
-func generateProcessModeOpts(mode ProcessMode) ([]oci.SpecOpts, error) {
-	if mode == NoProcessSandbox {
-		return []oci.SpecOpts{
-			oci.WithHostNamespace(specs.PIDNamespace),
-			withBoundProc(),
-		}, nil
-		// TODO(AkihiroSuda): Configure seccomp to disable ptrace (and prctl?) explicitly
-	}
-	return nil, nil
-}
-
-func generateIDmapOpts(idmap *idtools.IdentityMapping) ([]oci.SpecOpts, error) {
-	if idmap == nil {
-		return nil, nil
-	}
-	return []oci.SpecOpts{
-		oci.WithUserNamespace(specMapping(idmap.UIDMaps), specMapping(idmap.GIDMaps)),
-	}, nil
-}
-
-func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) {
-	if len(ulimits) == 0 {
-		return nil, nil
-	}
-	var rlimits []specs.POSIXRlimit
-	for _, u := range ulimits {
-		if u == nil {
-			continue
-		}
-		rlimits = append(rlimits, specs.POSIXRlimit{
-			Type: fmt.Sprintf("RLIMIT_%s", strings.ToUpper(u.Name)),
-			Hard: uint64(u.Hard),
-			Soft: uint64(u.Soft),
-		})
-	}
-	return []oci.SpecOpts{
-		func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
-			s.Process.Rlimits = rlimits
-			return nil
-		},
-	}, nil
-}
-
-// withDefaultProfile sets the default seccomp profile to the spec.
-// Note: must follow the setting of process capabilities
-func withDefaultProfile() oci.SpecOpts {
-	return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
-		var err error
-		s.Linux.Seccomp, err = seccomp.GetDefaultProfile(s)
-		return err
-	}
-}
-
-func getTracingSocketMount(socket string) specs.Mount {
-	return specs.Mount{
-		Destination: tracingSocketPath,
-		Type:        "bind",
-		Source:      socket,
-		Options:     []string{"ro", "rbind"},
-	}
-}
-
-func getTracingSocket() string {
-	return fmt.Sprintf("unix://%s", tracingSocketPath)
-}
-
-func cgroupV2NamespaceSupported() bool {
-	// Check if cgroups v2 namespaces are supported.  Trying to do cgroup
-	// namespaces with cgroups v1 results in EINVAL when we encounter a
-	// non-standard hierarchy.
-	// See https://github.com/moby/buildkit/issues/4108
-	cgroupNSOnce.Do(func() {
-		if _, err := os.Stat("/proc/self/ns/cgroup"); os.IsNotExist(err) {
-			return
-		}
-		if _, err := os.Stat("/sys/fs/cgroup/cgroup.subtree_control"); os.IsNotExist(err) {
-			return
-		}
-		supportsCgroupNS = true
-	})
-	return supportsCgroupNS
-}

+ 36 - 3
vendor/github.com/moby/buildkit/executor/oci/spec_windows.go

@@ -4,9 +4,13 @@
 package oci
 
 import (
+	"context"
 	"fmt"
+	"os"
 	"path/filepath"
+	"strings"
 
+	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/oci"
 	"github.com/containerd/continuity/fs"
@@ -20,8 +24,37 @@ const (
 	tracingSocketPath = "//./pipe/otel-grpc"
 )
 
+func withProcessArgs(args ...string) oci.SpecOpts {
+	cmdLine := strings.Join(args, " ")
+	// This will set Args to nil and properly set the CommandLine option
+	// in the spec. On Windows we need to use CommandLine instead of Args.
+	return oci.WithProcessCommandLine(cmdLine)
+}
+
+func withGetUserInfoMount() oci.SpecOpts {
+	return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
+		execPath, err := os.Executable()
+		if err != nil {
+			return errors.Wrap(err, "getting executable path")
+		}
+		// The buildkit binary registers a re-exec function that is invoked when called with
+		// get-user-info as the name. We mount the binary as read-only inside the container. This
+		// spares us from having to ship a separate binary just for this purpose. The container does
+		// not share any state with the running buildkit daemon. In this scenario, we use the re-exec
+		// functionality to simulate a multi-call binary.
+		s.Mounts = append(s.Mounts, specs.Mount{
+			Destination: "C:\\Windows\\System32\\get-user-info.exe",
+			Source:      execPath,
+			Options:     []string{"ro"},
+		})
+		return nil
+	}
+}
+
 func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) {
-	return nil, nil
+	return []oci.SpecOpts{
+		withGetUserInfoMount(),
+	}, nil
 }
 
 // generateSecurityOpts may affect mounts, so must be called after generateMountOpts
@@ -54,8 +87,8 @@ func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) {
 	return nil, errors.New("no support for POSIXRlimit on Windows")
 }
 
-func getTracingSocketMount(socket string) specs.Mount {
-	return specs.Mount{
+func getTracingSocketMount(socket string) *specs.Mount {
+	return &specs.Mount{
 		Destination: filepath.FromSlash(tracingSocketPath),
 		Source:      socket,
 		Options:     []string{"ro"},

+ 1 - 1
vendor/github.com/moby/buildkit/executor/oci/user.go

@@ -9,7 +9,7 @@ import (
 	"github.com/containerd/containerd/containers"
 	containerdoci "github.com/containerd/containerd/oci"
 	"github.com/containerd/continuity/fs"
-	"github.com/opencontainers/runc/libcontainer/user"
+	"github.com/moby/sys/user"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
 )

+ 3 - 3
vendor/github.com/moby/buildkit/executor/resources/monitor.go

@@ -11,9 +11,9 @@ import (
 	"time"
 
 	"github.com/moby/buildkit/executor/resources/types"
+	"github.com/moby/buildkit/util/bklog"
 	"github.com/moby/buildkit/util/network"
 	"github.com/prometheus/procfs"
-	"github.com/sirupsen/logrus"
 )
 
 const (
@@ -229,7 +229,7 @@ func NewMonitor() (*Monitor, error) {
 			return
 		}
 		if err := prepareCgroupControllers(); err != nil {
-			logrus.Warnf("failed to prepare cgroup controllers: %+v", err)
+			bklog.L.Warnf("failed to prepare cgroup controllers: %+v", err)
 		}
 	})
 
@@ -280,7 +280,7 @@ func prepareCgroupControllers() error {
 		}
 		if err := os.WriteFile(filepath.Join(defaultMountpoint, cgroupSubtreeFile), []byte("+"+c), 0); err != nil {
 			// ignore error
-			logrus.Warnf("failed to enable cgroup controller %q: %+v", c, err)
+			bklog.L.Warnf("failed to enable cgroup controller %q: %+v", c, err)
 		}
 	}
 	return nil

+ 23 - 22
vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go

@@ -146,8 +146,6 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex
 }
 
 func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (rec resourcestypes.Recorder, err error) {
-	meta := process.Meta
-
 	startedOnce := sync.Once{}
 	done := make(chan error, 1)
 	w.mu.Lock()
@@ -166,6 +164,11 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
 		}
 	}()
 
+	meta := process.Meta
+	if meta.NetMode == pb.NetMode_HOST {
+		bklog.G(ctx).Info("enabling HostNetworking")
+	}
+
 	provider, ok := w.networkProviders[meta.NetMode]
 	if !ok {
 		return nil, errors.Errorf("unknown network mode %s", meta.NetMode)
@@ -181,11 +184,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
 		}
 	}()
 
-	if meta.NetMode == pb.NetMode_HOST {
-		bklog.G(ctx).Info("enabling HostNetworking")
-	}
-
-	resolvConf, err := oci.GetResolvConf(ctx, w.root, w.idmap, w.dns)
+	resolvConf, err := oci.GetResolvConf(ctx, w.root, w.idmap, w.dns, meta.NetMode)
 	if err != nil {
 		return nil, err
 	}
@@ -369,7 +368,7 @@ func exitError(ctx context.Context, err error) error {
 		)
 		select {
 		case <-ctx.Done():
-			exitErr.Err = errors.Wrapf(ctx.Err(), exitErr.Error())
+			exitErr.Err = errors.Wrapf(context.Cause(ctx), exitErr.Error())
 			return exitErr
 		default:
 			return stack.Enable(exitErr)
@@ -402,7 +401,7 @@ func (w *runcExecutor) Exec(ctx context.Context, id string, process executor.Pro
 		}
 		select {
 		case <-ctx.Done():
-			return ctx.Err()
+			return context.Cause(ctx)
 		case err, ok := <-done:
 			if !ok || err == nil {
 				return errors.Errorf("container %s has stopped", id)
@@ -532,8 +531,9 @@ func (k procKiller) Kill(ctx context.Context) (err error) {
 
 	// this timeout is generally a no-op, the Kill ctx should already have a
 	// shorter timeout but here as a fail-safe for future refactoring.
-	ctx, timeout := context.WithTimeout(ctx, 10*time.Second)
-	defer timeout()
+	ctx, cancel := context.WithCancelCause(ctx)
+	ctx, _ = context.WithTimeoutCause(ctx, 10*time.Second, errors.WithStack(context.DeadlineExceeded))
+	defer cancel(errors.WithStack(context.Canceled))
 
 	if k.pidfile == "" {
 		// for `runc run` process we use `runc kill` to terminate the process
@@ -580,7 +580,7 @@ type procHandle struct {
 	monitorProcess *os.Process
 	ready          chan struct{}
 	ended          chan struct{}
-	shutdown       func()
+	shutdown       func(error)
 	// this this only used when the request context is canceled and we need
 	// to kill the in-container process.
 	killer procKiller
@@ -594,7 +594,7 @@ type procHandle struct {
 // The goal is to allow for runc to gracefully shutdown when the request context
 // is cancelled.
 func runcProcessHandle(ctx context.Context, killer procKiller) (*procHandle, context.Context) {
-	runcCtx, cancel := context.WithCancel(context.Background())
+	runcCtx, cancel := context.WithCancelCause(context.Background())
 	p := &procHandle{
 		ready:    make(chan struct{}),
 		ended:    make(chan struct{}),
@@ -615,17 +615,17 @@ func runcProcessHandle(ctx context.Context, killer procKiller) (*procHandle, con
 		for {
 			select {
 			case <-ctx.Done():
-				killCtx, timeout := context.WithTimeout(context.Background(), 7*time.Second)
+				killCtx, timeout := context.WithCancelCause(context.Background())
+				killCtx, _ = context.WithTimeoutCause(killCtx, 7*time.Second, errors.WithStack(context.DeadlineExceeded))
 				if err := p.killer.Kill(killCtx); err != nil {
 					select {
 					case <-killCtx.Done():
-						timeout()
-						cancel()
+						cancel(errors.WithStack(context.Cause(ctx)))
 						return
 					default:
 					}
 				}
-				timeout()
+				timeout(errors.WithStack(context.Canceled))
 				select {
 				case <-time.After(50 * time.Millisecond):
 				case <-p.ended:
@@ -653,7 +653,7 @@ func (p *procHandle) Release() {
 // goroutines.
 func (p *procHandle) Shutdown() {
 	if p.shutdown != nil {
-		p.shutdown()
+		p.shutdown(errors.WithStack(context.Canceled))
 	}
 }
 
@@ -663,7 +663,7 @@ func (p *procHandle) Shutdown() {
 func (p *procHandle) WaitForReady(ctx context.Context) error {
 	select {
 	case <-ctx.Done():
-		return ctx.Err()
+		return context.Cause(ctx)
 	case <-p.ready:
 		return nil
 	}
@@ -673,10 +673,11 @@ func (p *procHandle) WaitForReady(ctx context.Context) error {
 // We wait for up to 10s for the runc pid to be reported.  If the started
 // callback is non-nil it will be called after receiving the pid.
 func (p *procHandle) WaitForStart(ctx context.Context, startedCh <-chan int, started func()) error {
-	startedCtx, timeout := context.WithTimeout(ctx, 10*time.Second)
-	defer timeout()
+	ctx, cancel := context.WithCancelCause(ctx)
+	ctx, _ = context.WithTimeoutCause(ctx, 10*time.Second, errors.WithStack(context.DeadlineExceeded))
+	defer cancel(errors.WithStack(context.Canceled))
 	select {
-	case <-startedCtx.Done():
+	case <-ctx.Done():
 		return errors.New("go-runc started message never received")
 	case runcPid, ok := <-startedCh:
 		if !ok {

+ 3 - 3
vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go

@@ -14,13 +14,13 @@ import (
 	"golang.org/x/sync/errgroup"
 )
 
-var unsupportedConsoleError = errors.New("tty for runc is only supported on linux")
+var errUnsupportedConsole = errors.New("tty for runc is only supported on linux")
 
 func updateRuncFieldsForHostOS(runtime *runc.Runc) {}
 
 func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), keep bool) error {
 	if process.Meta.Tty {
-		return unsupportedConsoleError
+		return errUnsupportedConsole
 	}
 	extraArgs := []string{}
 	if keep {
@@ -40,7 +40,7 @@ func (w *runcExecutor) run(ctx context.Context, id, bundle string, process execu
 
 func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess *specs.Process, process executor.ProcessInfo, started func()) error {
 	if process.Meta.Tty {
-		return unsupportedConsoleError
+		return errUnsupportedConsole
 	}
 
 	killer, err := newExecProcKiller(w.runc, id)

+ 38 - 5
vendor/github.com/moby/buildkit/exporter/containerimage/export.go

@@ -12,6 +12,7 @@ import (
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/labels"
 	"github.com/containerd/containerd/leases"
 	"github.com/containerd/containerd/pkg/epoch"
 	"github.com/containerd/containerd/platforms"
@@ -63,9 +64,10 @@ func New(opt Opt) (exporter.Exporter, error) {
 	return im, nil
 }
 
-func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
+func (e *imageExporter) Resolve(ctx context.Context, id int, opt map[string]string) (exporter.ExporterInstance, error) {
 	i := &imageExporterInstance{
 		imageExporter: e,
+		id:            id,
 		opts: ImageCommitOpts{
 			RefCfg: cacheconfig.RefConfig{
 				Compression: compression.New(compression.Default),
@@ -166,6 +168,8 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
 
 type imageExporterInstance struct {
 	*imageExporter
+	id int
+
 	opts                 ImageCommitOpts
 	push                 bool
 	pushByDigest         bool
@@ -178,6 +182,10 @@ type imageExporterInstance struct {
 	meta                 map[string][]byte
 }
 
+func (e *imageExporterInstance) ID() int {
+	return e.id
+}
+
 func (e *imageExporterInstance) Name() string {
 	return "exporting to image"
 }
@@ -186,7 +194,8 @@ func (e *imageExporterInstance) Config() *exporter.Config {
 	return exporter.NewConfigWithCompression(e.opts.RefCfg.Compression)
 }
 
-func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source, sessionID string) (_ map[string]string, descref exporter.DescriptorReference, err error) {
+func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source, inlineCache exptypes.InlineCache, sessionID string) (_ map[string]string, descref exporter.DescriptorReference, err error) {
+	src = src.Clone()
 	if src.Metadata == nil {
 		src.Metadata = make(map[string][]byte)
 	}
@@ -211,7 +220,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source
 		}
 	}()
 
-	desc, err := e.opt.ImageWriter.Commit(ctx, src, sessionID, &opts)
+	desc, err := e.opt.ImageWriter.Commit(ctx, src, sessionID, inlineCache, &opts)
 	if err != nil {
 		return nil, nil, err
 	}
@@ -273,6 +282,13 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source
 				tagDone(nil)
 
 				if e.unpack {
+					if opts.RewriteTimestamp {
+						// e.unpackImage cannot be used because src ref does not point to the rewritten image
+						///
+						// TODO: change e.unpackImage so that it takes Result[Remote] as parameter.
+						// https://github.com/moby/buildkit/pull/4057#discussion_r1324106088
+						return nil, nil, errors.New("exporter option \"rewrite-timestamp\" conflicts with \"unpack\"")
+					}
 					if err := e.unpackImage(ctx, img, src, session.NewGroup(sessionID)); err != nil {
 						return nil, nil, err
 					}
@@ -284,6 +300,9 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source
 						refs = append(refs, src.Ref)
 					}
 					for _, ref := range src.Refs {
+						if ref == nil {
+							continue
+						}
 						refs = append(refs, ref)
 					}
 					eg, ctx := errgroup.WithContext(ctx)
@@ -309,7 +328,18 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source
 				}
 			}
 			if e.push {
-				err := e.pushImage(ctx, src, sessionID, targetName, desc.Digest)
+				if opts.RewriteTimestamp {
+					annotations := map[digest.Digest]map[string]string{}
+					addAnnotations(annotations, *desc)
+					// e.pushImage cannot be used because src ref does not point to the rewritten image
+					//
+					// TODO: change e.pushImage so that it takes Result[Remote] as parameter.
+					// https://github.com/moby/buildkit/pull/4057#discussion_r1324106088
+					err = push.Push(ctx, e.opt.SessionManager, sessionID, e.opt.ImageWriter.opt.ContentStore, e.opt.ImageWriter.ContentStore(),
+						desc.Digest, targetName, e.insecure, e.opt.RegistryHosts, e.pushByDigest, annotations)
+				} else {
+					err = e.pushImage(ctx, src, sessionID, targetName, desc.Digest)
+				}
 				if err != nil {
 					return nil, nil, errors.Wrapf(err, "failed to push %v", targetName)
 				}
@@ -339,6 +369,9 @@ func (e *imageExporterInstance) pushImage(ctx context.Context, src *exporter.Sou
 		refs = append(refs, src.Ref)
 	}
 	for _, ref := range src.Refs {
+		if ref == nil {
+			continue
+		}
 		refs = append(refs, ref)
 	}
 
@@ -454,7 +487,7 @@ func getLayers(ctx context.Context, descs []ocispecs.Descriptor, manifest ocispe
 	for i, desc := range descs {
 		layers[i].Diff = ocispecs.Descriptor{
 			MediaType: ocispecs.MediaTypeImageLayer,
-			Digest:    digest.Digest(desc.Annotations["containerd.io/uncompressed"]),
+			Digest:    digest.Digest(desc.Annotations[labels.LabelUncompressed]),
 		}
 		layers[i].Blob = manifest.Layers[i]
 	}

+ 4 - 0
vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/keys.go

@@ -72,4 +72,8 @@ var (
 	// Value: int (0-9) for gzip and estargz
 	// Value: int (0-22) for zstd
 	OptKeyCompressionLevel ImageExporterOptKey = "compression-level"
+
+	// Rewrite timestamps in layers to match SOURCE_DATE_EPOCH
+	// Value: bool <true|false>
+	OptKeyRewriteTimestamp ImageExporterOptKey = "rewrite-timestamp"
 )

+ 7 - 4
vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/parse.go

@@ -60,10 +60,13 @@ func ParsePlatforms(meta map[string][]byte) (Platforms, error) {
 	return ps, nil
 }
 
-func ParseKey(meta map[string][]byte, key string, p Platform) []byte {
-	if v, ok := meta[fmt.Sprintf("%s/%s", key, p.ID)]; ok {
-		return v
-	} else if v, ok := meta[key]; ok {
+func ParseKey(meta map[string][]byte, key string, p *Platform) []byte {
+	if p != nil {
+		if v, ok := meta[fmt.Sprintf("%s/%s", key, p.ID)]; ok {
+			return v
+		}
+	}
+	if v, ok := meta[key]; ok {
 		return v
 	}
 	return nil

Some files were not shown because too many files changed in this diff