Bladeren bron

vendor: github.com/moby/buildkit v0.12.2

The following changes were required:
* integration/build: progressui's signature changed in https://github.com/moby/buildkit/commit/6b8fbed01e7b857e53d5378b04152b583fae670e
* builder-next: flightcontrol.Group has become a generic type in https://github.com/moby/buildkit/commit/8ffc03b8f0426f6426cb25db744dc8976de0dccf
* builder-next/executor: add github.com/moby/buildkit/executor/resources types, necessitated by https://github.com/moby/buildkit/commit/6e87e4b455cdbf5454bb1db2da859a4d586c828b
* builder-next: stub util/network/Namespace.Sample(), necessitated by https://github.com/moby/buildkit/commit/963f16179f04c9061a8ecdb9c5cc813b99e1e423

Co-authored-by: CrazyMax <crazy-max@users.noreply.github.com>
Co-authored-by: Sebastiaan van Stijn <github@gone.nl>
Signed-off-by: Bjorn Neergaard <bjorn.neergaard@docker.com>
Bjorn Neergaard 2 jaren geleden
bovenliggende
commit
c217e3c87a
100 gewijzigde bestanden met toevoegingen van 4984 en 1418 verwijderingen
  1. 8 8
      builder/builder-next/adapters/containerimage/pull.go
  2. 12 0
      builder/builder-next/executor_linux.go
  3. 3 2
      builder/builder-next/executor_nolinux.go
  4. 1 1
      integration/build/build_traces_test.go
  5. 4 4
      vendor.mod
  6. 14 10
      vendor.sum
  7. 10 0
      vendor/github.com/anchore/go-struct-converter/.bouncer.yaml
  8. 30 0
      vendor/github.com/anchore/go-struct-converter/.gitignore
  9. 78 0
      vendor/github.com/anchore/go-struct-converter/.golangci.yaml
  10. 86 0
      vendor/github.com/anchore/go-struct-converter/CONTRIBUTING.md
  11. 14 4
      vendor/github.com/anchore/go-struct-converter/LICENSE
  12. 81 0
      vendor/github.com/anchore/go-struct-converter/Makefile
  13. 166 0
      vendor/github.com/anchore/go-struct-converter/README.md
  14. 95 0
      vendor/github.com/anchore/go-struct-converter/chain.go
  15. 334 0
      vendor/github.com/anchore/go-struct-converter/converter.go
  16. 0 1
      vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go
  17. 464 166
      vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go
  18. 168 0
      vendor/github.com/containerd/nydus-snapshotter/pkg/converter/cs_proxy_unix.go
  19. 140 25
      vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go
  20. 113 0
      vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/feature.go
  21. 98 5
      vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go
  22. 13 7
      vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go
  23. 7 10
      vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go
  24. 64 0
      vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go
  25. 0 2
      vendor/github.com/containerd/typeurl/.gitignore
  26. 0 20
      vendor/github.com/containerd/typeurl/README.md
  27. 0 83
      vendor/github.com/containerd/typeurl/doc.go
  28. 0 214
      vendor/github.com/containerd/typeurl/types.go
  29. 32 31
      vendor/github.com/moby/buildkit/cache/blobs.go
  30. 6 14
      vendor/github.com/moby/buildkit/cache/compression_nydus.go
  31. 2 0
      vendor/github.com/moby/buildkit/cache/contenthash/checksum.go
  32. 1 8
      vendor/github.com/moby/buildkit/cache/filelist.go
  33. 6 6
      vendor/github.com/moby/buildkit/cache/manager.go
  34. 1 1
      vendor/github.com/moby/buildkit/cache/metadata.go
  35. 4 3
      vendor/github.com/moby/buildkit/cache/metadata/metadata.go
  36. 92 30
      vendor/github.com/moby/buildkit/cache/refs.go
  37. 7 7
      vendor/github.com/moby/buildkit/cache/remote.go
  38. 136 31
      vendor/github.com/moby/buildkit/cache/remotecache/export.go
  39. 3 3
      vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go
  40. 42 12
      vendor/github.com/moby/buildkit/cache/remotecache/import.go
  41. 2 2
      vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go
  42. 19 40
      vendor/github.com/moby/buildkit/cache/remotecache/local/local.go
  43. 68 49
      vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go
  44. 1 1
      vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go
  45. 3 3
      vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go
  46. 6 6
      vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go
  47. 18 14
      vendor/github.com/moby/buildkit/cache/util/fsutil.go
  48. 147 47
      vendor/github.com/moby/buildkit/client/client.go
  49. 2 2
      vendor/github.com/moby/buildkit/client/llb/async.go
  50. 18 6
      vendor/github.com/moby/buildkit/client/llb/definition.go
  51. 3 1
      vendor/github.com/moby/buildkit/client/llb/diff.go
  52. 6 1
      vendor/github.com/moby/buildkit/client/llb/exec.go
  53. 48 13
      vendor/github.com/moby/buildkit/client/llb/fileop.go
  54. 7 7
      vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go
  55. 26 1
      vendor/github.com/moby/buildkit/client/llb/merge.go
  56. 26 1
      vendor/github.com/moby/buildkit/client/llb/meta.go
  57. 4 1
      vendor/github.com/moby/buildkit/client/llb/resolver.go
  58. 26 2
      vendor/github.com/moby/buildkit/client/llb/source.go
  59. 16 2
      vendor/github.com/moby/buildkit/client/llb/sourcemap.go
  60. 65 0
      vendor/github.com/moby/buildkit/client/llb/state.go
  61. 32 15
      vendor/github.com/moby/buildkit/client/ociindex/ociindex.go
  62. 1 1
      vendor/github.com/moby/buildkit/client/solve.go
  63. 8 7
      vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go
  64. 81 6
      vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go
  65. 13 2
      vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go
  66. 6 2
      vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go
  67. 42 33
      vendor/github.com/moby/buildkit/control/control.go
  68. 19 18
      vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go
  69. 2 1
      vendor/github.com/moby/buildkit/executor/executor.go
  70. 2 2
      vendor/github.com/moby/buildkit/executor/oci/hosts.go
  71. 28 32
      vendor/github.com/moby/buildkit/executor/oci/resolvconf.go
  72. 14 7
      vendor/github.com/moby/buildkit/executor/oci/spec.go
  73. 33 0
      vendor/github.com/moby/buildkit/executor/oci/spec_unix.go
  74. 24 0
      vendor/github.com/moby/buildkit/executor/oci/spec_windows.go
  75. 141 0
      vendor/github.com/moby/buildkit/executor/resources/cpu.go
  76. 117 0
      vendor/github.com/moby/buildkit/executor/resources/io.go
  77. 159 0
      vendor/github.com/moby/buildkit/executor/resources/memory.go
  78. 287 0
      vendor/github.com/moby/buildkit/executor/resources/monitor.go
  79. 15 0
      vendor/github.com/moby/buildkit/executor/resources/monitor_linux.go
  80. 8 0
      vendor/github.com/moby/buildkit/executor/resources/monitor_nolinux.go
  81. 45 0
      vendor/github.com/moby/buildkit/executor/resources/pids.go
  82. 139 0
      vendor/github.com/moby/buildkit/executor/resources/sampler.go
  83. 9 0
      vendor/github.com/moby/buildkit/executor/resources/sys.go
  84. 93 0
      vendor/github.com/moby/buildkit/executor/resources/sys_linux.go
  85. 9 0
      vendor/github.com/moby/buildkit/executor/resources/sys_nolinux.go
  86. 72 0
      vendor/github.com/moby/buildkit/executor/resources/types/systypes.go
  87. 104 0
      vendor/github.com/moby/buildkit/executor/resources/types/types.go
  88. 277 86
      vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go
  89. 34 18
      vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go
  90. 39 23
      vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go
  91. 7 6
      vendor/github.com/moby/buildkit/executor/stubs.go
  92. 5 5
      vendor/github.com/moby/buildkit/exporter/containerimage/attestations.go
  93. 83 75
      vendor/github.com/moby/buildkit/exporter/containerimage/export.go
  94. 75 0
      vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/keys.go
  95. 0 3
      vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go
  96. 4 4
      vendor/github.com/moby/buildkit/exporter/containerimage/image/docker_image.go
  97. 19 48
      vendor/github.com/moby/buildkit/exporter/containerimage/opts.go
  98. 36 88
      vendor/github.com/moby/buildkit/exporter/containerimage/writer.go
  99. 15 0
      vendor/github.com/moby/buildkit/exporter/exptypes/keys.go
  100. 41 29
      vendor/github.com/moby/buildkit/exporter/local/export.go

+ 8 - 8
builder/builder-next/adapters/containerimage/pull.go

@@ -63,7 +63,7 @@ type SourceOpt struct {
 // Source is the source implementation for accessing container images
 // Source is the source implementation for accessing container images
 type Source struct {
 type Source struct {
 	SourceOpt
 	SourceOpt
-	g flightcontrol.Group
+	g flightcontrol.Group[interface{}]
 }
 }
 
 
 // NewSource creates a new image source
 // NewSource creates a new image source
@@ -187,7 +187,7 @@ func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session
 type puller struct {
 type puller struct {
 	is               *Source
 	is               *Source
 	resolveLocalOnce sync.Once
 	resolveLocalOnce sync.Once
-	g                flightcontrol.Group
+	g                flightcontrol.Group[struct{}]
 	src              *source.ImageIdentifier
 	src              *source.ImageIdentifier
 	desc             ocispec.Descriptor
 	desc             ocispec.Descriptor
 	ref              string
 	ref              string
@@ -258,7 +258,7 @@ func (p *puller) resolveLocal() {
 }
 }
 
 
 func (p *puller) resolve(ctx context.Context, g session.Group) error {
 func (p *puller) resolve(ctx context.Context, g session.Group) error {
-	_, err := p.g.Do(ctx, "", func(ctx context.Context) (_ interface{}, err error) {
+	_, err := p.g.Do(ctx, "", func(ctx context.Context) (_ struct{}, err error) {
 		resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String())
 		resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String())
 		defer func() {
 		defer func() {
 			resolveProgressDone(err)
 			resolveProgressDone(err)
@@ -266,13 +266,13 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error {
 
 
 		ref, err := distreference.ParseNormalizedNamed(p.src.Reference.String())
 		ref, err := distreference.ParseNormalizedNamed(p.src.Reference.String())
 		if err != nil {
 		if err != nil {
-			return nil, err
+			return struct{}{}, err
 		}
 		}
 
 
 		if p.desc.Digest == "" && p.config == nil {
 		if p.desc.Digest == "" && p.config == nil {
 			origRef, desc, err := p.resolver(g).Resolve(ctx, ref.String())
 			origRef, desc, err := p.resolver(g).Resolve(ctx, ref.String())
 			if err != nil {
 			if err != nil {
-				return nil, err
+				return struct{}{}, err
 			}
 			}
 
 
 			p.desc = desc
 			p.desc = desc
@@ -287,16 +287,16 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error {
 		if p.config == nil && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
 		if p.config == nil && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest {
 			ref, err := distreference.WithDigest(ref, p.desc.Digest)
 			ref, err := distreference.WithDigest(ref, p.desc.Digest)
 			if err != nil {
 			if err != nil {
-				return nil, err
+				return struct{}{}, err
 			}
 			}
 			_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm, g)
 			_, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm, g)
 			if err != nil {
 			if err != nil {
-				return nil, err
+				return struct{}{}, err
 			}
 			}
 
 
 			p.config = dt
 			p.config = dt
 		}
 		}
-		return nil, nil
+		return struct{}{}, nil
 	})
 	})
 	return err
 	return err
 }
 }

+ 12 - 0
builder/builder-next/executor_linux.go

@@ -14,6 +14,7 @@ import (
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor/oci"
 	"github.com/moby/buildkit/executor/oci"
+	"github.com/moby/buildkit/executor/resources"
 	"github.com/moby/buildkit/executor/runcexecutor"
 	"github.com/moby/buildkit/executor/runcexecutor"
 	"github.com/moby/buildkit/identity"
 	"github.com/moby/buildkit/identity"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/solver/pb"
@@ -49,6 +50,11 @@ func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfi
 		pidmap = nil
 		pidmap = nil
 	}
 	}
 
 
+	rm, err := resources.NewMonitor()
+	if err != nil {
+		return nil, err
+	}
+
 	return runcexecutor.New(runcexecutor.Opt{
 	return runcexecutor.New(runcexecutor.Opt{
 		Root:                filepath.Join(root, "executor"),
 		Root:                filepath.Join(root, "executor"),
 		CommandCandidates:   []string{"runc"},
 		CommandCandidates:   []string{"runc"},
@@ -58,6 +64,7 @@ func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfi
 		IdentityMapping:     pidmap,
 		IdentityMapping:     pidmap,
 		DNS:                 dnsConfig,
 		DNS:                 dnsConfig,
 		ApparmorProfile:     apparmorProfile,
 		ApparmorProfile:     apparmorProfile,
+		ResourceMonitor:     rm,
 	}, networkProviders)
 	}, networkProviders)
 }
 }
 
 
@@ -119,6 +126,11 @@ func (iface *lnInterface) init(c *libnetwork.Controller, n *libnetwork.Network)
 	iface.ep = ep
 	iface.ep = ep
 }
 }
 
 
+// TODO(neersighted): Unstub Sample(), and collect data from the libnetwork Endpoint.
+func (iface *lnInterface) Sample() (*network.Sample, error) {
+	return &network.Sample{}, nil
+}
+
 func (iface *lnInterface) Set(s *specs.Spec) error {
 func (iface *lnInterface) Set(s *specs.Spec) error {
 	<-iface.ready
 	<-iface.ready
 	if iface.err != nil {
 	if iface.err != nil {

+ 3 - 2
builder/builder-next/executor_nolinux.go

@@ -12,6 +12,7 @@ import (
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor/oci"
 	"github.com/moby/buildkit/executor/oci"
+	resourcetypes "github.com/moby/buildkit/executor/resources/types"
 )
 )
 
 
 func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool, _ idtools.IdentityMapping, _ string) (executor.Executor, error) {
 func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool, _ idtools.IdentityMapping, _ string) (executor.Executor, error) {
@@ -20,8 +21,8 @@ func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool
 
 
 type stubExecutor struct{}
 type stubExecutor struct{}
 
 
-func (w *stubExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) {
-	return errors.New("buildkit executor not implemented for "+runtime.GOOS)
+func (w *stubExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (resourcetypes.Recorder, error) {
+	return nil, errors.New("buildkit executor not implemented for "+runtime.GOOS)
 }
 }
 
 
 func (w *stubExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) error {
 func (w *stubExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) error {

+ 1 - 1
integration/build/build_traces_test.go

@@ -57,7 +57,7 @@ func TestBuildkitHistoryTracePropagation(t *testing.T) {
 	}()
 	}()
 
 
 	eg.Go(func() error {
 	eg.Go(func() error {
-		_, err := progressui.DisplaySolveStatus(ctxGo, "test", nil, &testWriter{t}, ch)
+		_, err := progressui.DisplaySolveStatus(ctxGo, nil, &testWriter{t}, ch, progressui.WithPhase("test"))
 		return err
 		return err
 	})
 	})
 
 

+ 4 - 4
vendor.mod

@@ -61,7 +61,7 @@ require (
 	github.com/miekg/dns v1.1.43
 	github.com/miekg/dns v1.1.43
 	github.com/mistifyio/go-zfs/v3 v3.0.1
 	github.com/mistifyio/go-zfs/v3 v3.0.1
 	github.com/mitchellh/copystructure v1.2.0
 	github.com/mitchellh/copystructure v1.2.0
-	github.com/moby/buildkit v0.11.7-0.20230723230859-616c3f613b54 // v0.11 branch
+	github.com/moby/buildkit v0.12.2
 	github.com/moby/ipvs v1.1.0
 	github.com/moby/ipvs v1.1.0
 	github.com/moby/locker v1.0.1
 	github.com/moby/locker v1.0.1
 	github.com/moby/patternmatcher v0.6.0
 	github.com/moby/patternmatcher v0.6.0
@@ -116,6 +116,7 @@ require (
 	cloud.google.com/go/longrunning v0.4.1 // indirect
 	cloud.google.com/go/longrunning v0.4.1 // indirect
 	github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect
 	github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect
 	github.com/agext/levenshtein v1.2.3 // indirect
 	github.com/agext/levenshtein v1.2.3 // indirect
+	github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect
 	github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect
 	github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect
 	github.com/armon/go-metrics v0.4.1 // indirect
 	github.com/armon/go-metrics v0.4.1 // indirect
 	github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 // indirect
 	github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 // indirect
@@ -134,10 +135,9 @@ require (
 	github.com/containerd/console v1.0.3 // indirect
 	github.com/containerd/console v1.0.3 // indirect
 	github.com/containerd/go-cni v1.1.9 // indirect
 	github.com/containerd/go-cni v1.1.9 // indirect
 	github.com/containerd/go-runc v1.1.0 // indirect
 	github.com/containerd/go-runc v1.1.0 // indirect
-	github.com/containerd/nydus-snapshotter v0.3.1 // indirect
+	github.com/containerd/nydus-snapshotter v0.8.2 // indirect
 	github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
 	github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
 	github.com/containerd/ttrpc v1.2.2 // indirect
 	github.com/containerd/ttrpc v1.2.2 // indirect
-	github.com/containerd/typeurl v1.0.2 // indirect
 	github.com/containernetworking/cni v1.1.2 // indirect
 	github.com/containernetworking/cni v1.1.2 // indirect
 	github.com/cyphar/filepath-securejoin v0.2.3 // indirect
 	github.com/cyphar/filepath-securejoin v0.2.3 // indirect
 	github.com/dimchansky/utfbom v1.1.1 // indirect
 	github.com/dimchansky/utfbom v1.1.1 // indirect
@@ -177,7 +177,7 @@ require (
 	github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
 	github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
 	github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
 	github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
 	github.com/shibumi/go-pathspec v1.3.0 // indirect
 	github.com/shibumi/go-pathspec v1.3.0 // indirect
-	github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f // indirect
+	github.com/spdx/tools-golang v0.5.1 // indirect
 	github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
 	github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect
 	github.com/tinylib/msgp v1.1.8 // indirect
 	github.com/tinylib/msgp v1.1.8 // indirect
 	github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb // indirect
 	github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb // indirect

+ 14 - 10
vendor.sum

@@ -117,8 +117,10 @@ github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae
 github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14=
 github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14=
 github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0 h1:cOjLyhBhe91glgZZNbQUg9BJC57l6BiSKov0Ivv7k0U=
 github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0 h1:cOjLyhBhe91glgZZNbQUg9BJC57l6BiSKov0Ivv7k0U=
 github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0/go.mod h1:fBaQWrftOD5CrVCUfoYGHs4X4VViTuGOXA8WloCjTY0=
 github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0/go.mod h1:fBaQWrftOD5CrVCUfoYGHs4X4VViTuGOXA8WloCjTY0=
+github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
 github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
 github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
 github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
 github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
+github.com/Masterminds/semver/v3 v3.1.0 h1:Y2lUDsFKVRSYGojLJ1yLxSXdMmMYTYls0rCvoqmMUQk=
 github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
 github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
 github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
 github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
 github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
 github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
@@ -155,6 +157,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
 github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
 github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
 github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
 github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
 github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
 github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1rlcoLz8y5B2r4tTLMiVTrMtpfY0O8EScKJxaSaEc=
+github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA=
 github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
 github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
 github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
 github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
 github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
 github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
@@ -326,8 +330,8 @@ github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHr
 github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
 github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
 github.com/containerd/go-runc v1.1.0 h1:OX4f+/i2y5sUT7LhmcJH7GYrjjhHa1QI4e8yO0gGleA=
 github.com/containerd/go-runc v1.1.0 h1:OX4f+/i2y5sUT7LhmcJH7GYrjjhHa1QI4e8yO0gGleA=
 github.com/containerd/go-runc v1.1.0/go.mod h1:xJv2hFF7GvHtTJd9JqTS2UVxMkULUYw4JN5XAUZqH5U=
 github.com/containerd/go-runc v1.1.0/go.mod h1:xJv2hFF7GvHtTJd9JqTS2UVxMkULUYw4JN5XAUZqH5U=
-github.com/containerd/nydus-snapshotter v0.3.1 h1:b8WahTrPkt3XsabjG2o/leN4fw3HWZYr+qxo/Z8Mfzk=
-github.com/containerd/nydus-snapshotter v0.3.1/go.mod h1:+8R7NX7vrjlxAgtidnsstwIhpzyTlriYPssTxH++uiM=
+github.com/containerd/nydus-snapshotter v0.8.2 h1:7SOrMU2YmLzfbsr5J7liMZJlNi5WT6vtIOxLGv+iz7E=
+github.com/containerd/nydus-snapshotter v0.8.2/go.mod h1:UJILTN5LVBRY+dt8BGJbp72Xy729hUZsOugObEI3/O8=
 github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4=
 github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4=
 github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
 github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
 github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
 github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
@@ -337,8 +341,6 @@ github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtO
 github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak=
 github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak=
 github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
 github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
 github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
 github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
-github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
-github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
 github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4=
 github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4=
 github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0=
 github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0=
 github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
 github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
@@ -626,7 +628,6 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
 github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
 github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
 github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
 github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@@ -906,8 +907,8 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx
 github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
 github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
 github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
 github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs=
 github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ=
 github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ=
-github.com/moby/buildkit v0.11.7-0.20230723230859-616c3f613b54 h1:LSh03Csyx/zQq8MreC9MYMQE/+5EkohwZMvXSS6kMZo=
-github.com/moby/buildkit v0.11.7-0.20230723230859-616c3f613b54/go.mod h1:bMQDryngJKGvJ/ZuRFhrejurbvYSv3NkGCheQ59X4AM=
+github.com/moby/buildkit v0.12.2 h1:B7guBgY6sfk4dBlv/ORUxyYlp0UojYaYyATgtNwSCXc=
+github.com/moby/buildkit v0.12.2/go.mod h1:adB4y0SxxX8trnrY+oEulb48ODLqPO6pKMF0ppGcCoI=
 github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ=
 github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ=
 github.com/moby/ipvs v1.1.0/go.mod h1:4VJMWuf098bsUMmZEiD4Tjk/O7mOn3l1PTD3s4OoYAs=
 github.com/moby/ipvs v1.1.0/go.mod h1:4VJMWuf098bsUMmZEiD4Tjk/O7mOn3l1PTD3s4OoYAs=
 github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
 github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
@@ -1159,8 +1160,8 @@ github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34c
 github.com/sourcegraph/go-diff v0.5.3/go.mod h1:v9JDtjCE4HHHCZGId75rg8gkKKa98RVjBcBGsVmMmak=
 github.com/sourcegraph/go-diff v0.5.3/go.mod h1:v9JDtjCE4HHHCZGId75rg8gkKKa98RVjBcBGsVmMmak=
 github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
 github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
 github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM=
 github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM=
-github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f h1:9B623Cfs+mclYK6dsae7gLSwuIBHvlgmEup87qpqsAQ=
-github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f/go.mod h1:VHzvNsKAfAGqs4ZvwRL+7a0dNsL20s7lGui4K9C0xQM=
+github.com/spdx/tools-golang v0.5.1 h1:fJg3SVOGG+eIva9ZUBm/hvyA7PIPVFjRxUKe6fdAgwE=
+github.com/spdx/tools-golang v0.5.1/go.mod h1:/DRDQuBfB37HctM29YtrX1v+bXiVmT2OpQDalRmX9aU=
 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
 github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
 github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
 github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
 github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
@@ -1201,7 +1202,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
 github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
 github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
+github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
 github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
 github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
 github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@@ -1951,6 +1953,8 @@ k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
 k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuBkhu+yxOc1Js=
 k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuBkhu+yxOc1Js=
 k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
 k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
 k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
 k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+kernel.org/pub/linux/libs/security/libcap/cap v1.2.67 h1:sPQ9qlSNR26fToTKbxe/HDWJlXvBLqGmt84LGCQkOy0=
+kernel.org/pub/linux/libs/security/libcap/psx v1.2.67 h1:NxbXJ7pDVq0FKBsqjieT92QDXI2XaqH2HAi4QcCOHt8=
 modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
 modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
 modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
 modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
 modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
 modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=

+ 10 - 0
vendor/github.com/anchore/go-struct-converter/.bouncer.yaml

@@ -0,0 +1,10 @@
+permit:
+  - BSD.*
+  - CC0.*
+  - MIT.*
+  - Apache.*
+  - MPL.*
+  - ISC
+  - WTFPL
+
+ignore-packages:

+ 30 - 0
vendor/github.com/anchore/go-struct-converter/.gitignore

@@ -0,0 +1,30 @@
+# If you prefer the allow list template instead of the deny list, see community template:
+# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
+#
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
+
+# Go workspace file
+go.work
+
+# tools
+.tmp
+
+# test output
+test/results
+
+# IDE project files
+.idea

+ 78 - 0
vendor/github.com/anchore/go-struct-converter/.golangci.yaml

@@ -0,0 +1,78 @@
+#issues:
+#  # The list of ids of default excludes to include or disable.
+#  include:
+#    - EXC0002 # disable excluding of issues about comments from golint
+
+linters:
+  # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint
+  disable-all: true
+  enable:
+    - asciicheck
+    - bodyclose
+    - depguard
+    - dogsled
+    - dupl
+    - errcheck
+    - exportloopref
+    - funlen
+    - gocognit
+    - goconst
+    - gocritic
+    - gocyclo
+    - gofmt
+    - goprintffuncname
+    - gosec
+    - gosimple
+    - govet
+    - ineffassign
+    - misspell
+    - nakedret
+    - nolintlint
+    - revive
+    - staticcheck
+    - stylecheck
+    - typecheck
+    - unconvert
+    - unparam
+    - unused
+    - whitespace
+
+# do not enable...
+#    - gochecknoglobals
+#    - gochecknoinits    # this is too aggressive
+#    - rowserrcheck disabled per generics https://github.com/golangci/golangci-lint/issues/2649
+#    - godot
+#    - godox
+#    - goerr113
+#    - goimports   # we're using gosimports now instead to account for extra whitespaces (see https://github.com/golang/go/issues/20818)
+#    - golint      # deprecated
+#    - gomnd       # this is too aggressive
+#    - interfacer  # this is a good idea, but is no longer supported and is prone to false positives
+#    - lll         # without a way to specify per-line exception cases, this is not usable
+#    - maligned    # this is an excellent linter, but tricky to optimize and we are not sensitive to memory layout optimizations
+#    - nestif
+#    - prealloc    # following this rule isn't consistently a good idea, as it sometimes forces unnecessary allocations that result in less idiomatic code
+#    - scopelint   # deprecated
+#    - testpackage
+#    - wsl         # this doens't have an auto-fixer yet and is pretty noisy (https://github.com/bombsimon/wsl/issues/90)
+
+linters-settings:
+  funlen:
+    # Checks the number of lines in a function.
+    # If lower than 0, disable the check.
+    # Default: 60
+    lines: 140
+    # Checks the number of statements in a function.
+    # If lower than 0, disable the check.
+    # Default: 40
+    statements: 100
+
+  gocognit:
+    # Minimal code complexity to report
+    # Default: 30 (but we recommend 10-20)
+    min-complexity: 80
+
+  gocyclo:
+    # Minimal code complexity to report.
+    # Default: 30 (but we recommend 10-20)
+    min-complexity: 50

+ 86 - 0
vendor/github.com/anchore/go-struct-converter/CONTRIBUTING.md

@@ -0,0 +1,86 @@
+# Contributing to go-struct-converter
+
+If you are looking to contribute to this project and want to open a GitHub pull request ("PR"), there are a few guidelines of what we are looking for in patches. Make sure you go through this document and ensure that your code proposal is aligned.
+
+## Sign off your work
+
+The `sign-off` is an added line at the end of the explanation for the commit, certifying that you wrote it or otherwise have the right to submit it as an open-source patch. By submitting a contribution, you agree to be bound by the terms of the DCO Version 1.1 and Apache License Version 2.0.
+
+Signing off a commit certifies the below Developer's Certificate of Origin (DCO):
+
+```text
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+   (a) The contribution was created in whole or in part by me and I
+       have the right to submit it under the open source license
+       indicated in the file; or
+
+   (b) The contribution is based upon previous work that, to the best
+       of my knowledge, is covered under an appropriate open source
+       license and I have the right under that license to submit that
+       work with modifications, whether created in whole or in part
+       by me, under the same open source license (unless I am
+       permitted to submit under a different license), as indicated
+       in the file; or
+
+   (c) The contribution was provided directly to me by some other
+       person who certified (a), (b) or (c) and I have not modified
+       it.
+
+   (d) I understand and agree that this project and the contribution
+       are public and that a record of the contribution (including all
+       personal information I submit with it, including my sign-off) is
+       maintained indefinitely and may be redistributed consistent with
+       this project or the open source license(s) involved.
+```
+
+All contributions to this project are licensed under the [Apache License Version 2.0, January 2004](http://www.apache.org/licenses/).
+
+When committing your change, you can add the required line manually so that it looks like this:
+
+```text
+Signed-off-by: John Doe <john.doe@example.com>
+```
+
+Alternatively, configure your Git client with your name and email to use the `-s` flag when creating a commit:
+
+```text
+$ git config --global user.name "John Doe"
+$ git config --global user.email "john.doe@example.com"
+```
+
+Creating a signed-off commit is then possible with `-s` or `--signoff`:
+
+```text
+$ git commit -s -m "this is a commit message"
+```
+
+To double-check that the commit was signed-off, look at the log output:
+
+```text
+$ git log -1
+commit 37ceh170e4hb283bb73d958f2036ee5k07e7fde7 (HEAD -> issue-35, origin/main, main)
+Author: John Doe <john.doe@example.com>
+Date:   Mon Aug 1 11:27:13 2020 -0400
+
+    this is a commit message
+
+    Signed-off-by: John Doe <john.doe@example.com>
+```
+
+[//]: # "TODO: Commit guidelines, granular commits"
+[//]: # "TODO: Commit guidelines, descriptive messages"
+[//]: # "TODO: Commit guidelines, commit title, extra body description"
+[//]: # "TODO: PR title and description"
+
+## Test your changes
+
+Ensure that your changes have passed the test suite.
+
+Simply run `make test` to have all tests run and validate changes work properly.
+
+## Document your changes
+
+When proposed changes are modifying user-facing functionality or output, it is expected the PR will include updates to the documentation as well.

+ 14 - 4
vendor/github.com/containerd/typeurl/LICENSE → vendor/github.com/anchore/go-struct-converter/LICENSE

@@ -1,7 +1,6 @@
-
                                  Apache License
                                  Apache License
                            Version 2.0, January 2004
                            Version 2.0, January 2004
-                        https://www.apache.org/licenses/
+                        http://www.apache.org/licenses/
 
 
    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 
 
@@ -176,13 +175,24 @@
 
 
    END OF TERMS AND CONDITIONS
    END OF TERMS AND CONDITIONS
 
 
-   Copyright The containerd Authors
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
 
 
    Licensed under the Apache License, Version 2.0 (the "License");
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
    you may not use this file except in compliance with the License.
    You may obtain a copy of the License at
    You may obtain a copy of the License at
 
 
-       https://www.apache.org/licenses/LICENSE-2.0
+       http://www.apache.org/licenses/LICENSE-2.0
 
 
    Unless required by applicable law or agreed to in writing, software
    Unless required by applicable law or agreed to in writing, software
    distributed under the License is distributed on an "AS IS" BASIS,
    distributed under the License is distributed on an "AS IS" BASIS,

+ 81 - 0
vendor/github.com/anchore/go-struct-converter/Makefile

@@ -0,0 +1,81 @@
+TEMPDIR = ./.tmp
+
+# commands and versions
+LINTCMD = $(TEMPDIR)/golangci-lint run --tests=false --timeout=5m --config .golangci.yaml
+GOIMPORTS_CMD = $(TEMPDIR)/gosimports -local github.com/anchore
+
+# tool versions
+GOLANGCILINT_VERSION = v1.50.1
+GOSIMPORTS_VERSION = v0.3.4
+BOUNCER_VERSION = v0.4.0
+
+# formatting variables
+BOLD := $(shell tput -T linux bold)
+PURPLE := $(shell tput -T linux setaf 5)
+GREEN := $(shell tput -T linux setaf 2)
+CYAN := $(shell tput -T linux setaf 6)
+RED := $(shell tput -T linux setaf 1)
+RESET := $(shell tput -T linux sgr0)
+TITLE := $(BOLD)$(PURPLE)
+SUCCESS := $(BOLD)$(GREEN)
+
+# test variables
+RESULTSDIR = test/results
+COVER_REPORT = $(RESULTSDIR)/unit-coverage-details.txt
+COVER_TOTAL = $(RESULTSDIR)/unit-coverage-summary.txt
+# the quality gate lower threshold for unit test total % coverage (by function statements)
+COVERAGE_THRESHOLD := 80
+
+$(RESULTSDIR):
+	mkdir -p $(RESULTSDIR)
+
+$(TEMPDIR):
+	mkdir -p $(TEMPDIR)
+
+.PHONY: bootstrap-tools
+bootstrap-tools: $(TEMPDIR)
+	curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(TEMPDIR)/ $(GOLANGCILINT_VERSION)
+	curl -sSfL https://raw.githubusercontent.com/wagoodman/go-bouncer/master/bouncer.sh | sh -s -- -b $(TEMPDIR)/ $(BOUNCER_VERSION)
+	# the only difference between goimports and gosimports is that gosimports removes extra whitespace between import blocks (see https://github.com/golang/go/issues/20818)
+	GOBIN="$(realpath $(TEMPDIR))" go install github.com/rinchsan/gosimports/cmd/gosimports@$(GOSIMPORTS_VERSION)
+
+.PHONY: static-analysis
+static-analysis: check-licenses lint
+
+.PHONY: lint
+lint: ## Run gofmt + golangci lint checks
+	$(call title,Running linters)
+	# ensure there are no go fmt differences
+	@printf "files with gofmt issues: [$(shell gofmt -l -s .)]\n"
+	@test -z "$(shell gofmt -l -s .)"
+
+	# run all golangci-lint rules
+	$(LINTCMD)
+	@[ -z "$(shell $(GOIMPORTS_CMD) -d .)" ] || (echo "goimports needs to be fixed" && false)
+
+	# go tooling does not play well with certain filename characters, ensure the common cases don't result in future "go get" failures
+	$(eval MALFORMED_FILENAMES := $(shell find . | grep -e ':'))
+	@bash -c "[[ '$(MALFORMED_FILENAMES)' == '' ]] || (printf '\nfound unsupported filename characters:\n$(MALFORMED_FILENAMES)\n\n' && false)"
+
+.PHONY: lint-fix
+lint-fix: ## Auto-format all source code + run golangci lint fixers
+	$(call title,Running lint fixers)
+	gofmt -w -s .
+	$(GOIMPORTS_CMD) -w .
+	$(LINTCMD) --fix
+	go mod tidy
+
+.PHONY: check-licenses
+check-licenses: ## Ensure transitive dependencies are compliant with the current license policy
+	$(TEMPDIR)/bouncer check ./...
+
+.PHONY: unit
+unit: $(RESULTSDIR) ## Run unit tests (with coverage)
+	$(call title,Running unit tests)
+	go test  -coverprofile $(COVER_REPORT) $(shell go list ./... | grep -v anchore/syft/test)
+	@go tool cover -func $(COVER_REPORT) | grep total |  awk '{print substr($$3, 1, length($$3)-1)}' > $(COVER_TOTAL)
+	@echo "Coverage: $$(cat $(COVER_TOTAL))"
+	@if [ $$(echo "$$(cat $(COVER_TOTAL)) >= $(COVERAGE_THRESHOLD)" | bc -l) -ne 1 ]; then echo "$(RED)$(BOLD)Failed coverage quality gate (> $(COVERAGE_THRESHOLD)%)$(RESET)" && false; fi
+
+.PHONY: test
+test: unit

+ 166 - 0
vendor/github.com/anchore/go-struct-converter/README.md

@@ -0,0 +1,166 @@
+# Go `struct` Converter
+
+A library for converting between Go structs.
+
+```go
+chain := converter.NewChain(V1{}, V2{}, V3{})
+
+chain.Convert(myV1struct, &myV3struct)
+```
+
+## Details
+
+At its core, this library provides a `Convert` function, which automatically
+handles converting fields with the same name, and "convertable"
+types. Some examples are:
+* `string` -> `string`
+* `string` -> `*string`
+* `int` -> `string`
+* `string` -> `[]string`
+
+The automatic conversions are implemented when there is an obvious way
+to convert between the types. A lot more automatic conversions happen
+-- see [the converter tests](converter_test.go) for a more comprehensive
+list of what is currently supported.
+
+Not everything can be handled automatically, however, so there is also
+a `ConvertFrom` interface any struct in the graph can implement to
+perform custom conversion, similar to how the stdlib `MarshalJSON` and
+`UnmarshalJSON` would be implemented.
+
+Additionally, and maybe most importantly, there is a `converter.Chain` available,
+which orchestrates conversions between _multiple versions_ of structs. This could
+be thought of similar to database migrations: given a starting struct and a target
+struct, the `chain.Convert` function iterates through every intermediary migration
+in order to arrive at the target struct.
+
+## Basic Usage
+
+To illustrate usage we'll start with a few basic structs, some of which
+implement the `ConvertFrom` interface due to breaking changes:
+
+```go
+// --------- V1 struct definition below ---------
+
+type V1 struct {
+  Name     string
+  OldField string
+}
+
+// --------- V2 struct definition below ---------
+
+type V2 struct {
+  Name     string
+  NewField string // this was a renamed field
+}
+
+func (to *V2) ConvertFrom(from interface{}) error {
+  if from, ok := from.(V1); ok { // forward migration
+    to.NewField = from.OldField
+  }
+  return nil
+}
+
+// --------- V3 struct definition below ---------
+
+type V3 struct {
+  Name       []string
+  FinalField []string // this field was renamed and the type was changed
+}
+
+func (to *V3) ConvertFrom(from interface{}) error {
+  if from, ok := from.(V2); ok { // forward migration
+    to.FinalField = []string{from.NewField}
+  }
+  return nil
+}
+```
+
+Given these type definitions, we can easily set up a conversion chain
+like this:
+
+```go
+chain := converter.NewChain(V1{}, V2{}, V3{})
+```
+
+This chain can then be used to convert from an _older version_ to a _newer 
+version_. This is because our `ConvertFrom` definitions are only handling
+_forward_ migrations.
+
+This chain can be used to convert from a `V1` struct to a `V3` struct easily,
+like this:
+
+```go
+v1 := // somehow get a populated v1 struct
+v3 := V3{}
+chain.Convert(v1, &v3)
+```
+
+Since we've defined our chain as `V1` &rarr; `V2` &rarr; `V3`, the chain will execute
+conversions to all intermediary structs (`V2`, in this case) and ultimately end
+when we've populated the `v3` instance.
+
+Note we haven't needed to define any conversions on the `Name` field of any structs
+since this one is convertible between structs: `string` &rarr; `string` &rarr; `[]string`.
+
+## Backwards Migrations
+
+If we wanted to _also_ provide backwards migrations, we could also easily add a case
+to the `ConvertFrom` methods. The whole set of structs would look something like this:
+
+
+```go
+// --------- V1 struct definition below ---------
+
+type V1 struct {
+  Name     string
+  OldField string
+}
+
+func (to *V1) ConvertFrom(from interface{}) error {
+  if from, ok := from.(V2); ok { // backward migration
+    to.OldField = from.NewField
+  }
+  return nil
+}
+
+// --------- V2 struct definition below ---------
+
+type V2 struct {
+  Name     string
+  NewField string
+}
+
+func (to *V2) ConvertFrom(from interface{}) error {
+  if from, ok := from.(V1); ok { // forward migration
+    to.NewField = from.OldField
+  }
+  if from, ok := from.(V3); ok { // backward migration
+    to.NewField = from.FinalField[0]
+  }
+  return nil
+}
+
+// --------- V3 struct definition below ---------
+
+type V3 struct {
+  Name       []string
+  FinalField []string
+}
+
+func (to *V3) ConvertFrom(from interface{}) error {
+  if from, ok := from.(V2); ok { // forward migration
+    to.FinalField = []string{from.NewField}
+  }
+  return nil
+}
+```
+
+At this point we could convert in either direction, for example a 
+`V3` struct could convert to a `V1` struct, with the caveat that there
+may be data loss, as might need to happen due to changes in the data shapes.
+
+## Contributing
+
+If you would like to contribute to this repository, please see the
+[CONTRIBUTING.md](CONTRIBUTING.md).

+ 95 - 0
vendor/github.com/anchore/go-struct-converter/chain.go

@@ -0,0 +1,95 @@
+package converter
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// NewChain takes a set of structs, in order, to allow for accurate chain.Convert(from, &to) calls. NewChain should
+// be called with struct values in a manner similar to this:
+// converter.NewChain(v1.Document{}, v2.Document{}, v3.Document{})
+func NewChain(structs ...interface{}) Chain {
+	out := Chain{}
+	for _, s := range structs {
+		typ := reflect.TypeOf(s)
+		if isPtr(typ) { // these shouldn't be pointers, but check just to be safe
+			typ = typ.Elem()
+		}
+		out.Types = append(out.Types, typ)
+	}
+	return out
+}
+
+// Chain holds a set of types with which to migrate through when a `chain.Convert` call is made
+type Chain struct {
+	Types []reflect.Type
+}
+
+// Convert converts from one type in the chain to the target type, calling each conversion in between
+func (c Chain) Convert(from interface{}, to interface{}) (err error) {
+	fromValue := reflect.ValueOf(from)
+	fromType := fromValue.Type()
+
+	// handle incoming pointers
+	for isPtr(fromType) {
+		fromValue = fromValue.Elem()
+		fromType = fromType.Elem()
+	}
+
+	toValuePtr := reflect.ValueOf(to)
+	toTypePtr := toValuePtr.Type()
+
+	if !isPtr(toTypePtr) {
+		return fmt.Errorf("TO struct provided not a pointer, unable to set values: %v", to)
+	}
+
+	// toValue must be a pointer but need a reference to the struct type directly
+	toValue := toValuePtr.Elem()
+	toType := toValue.Type()
+
+	fromIdx := -1
+	toIdx := -1
+
+	for i, typ := range c.Types {
+		if typ == fromType {
+			fromIdx = i
+		}
+		if typ == toType {
+			toIdx = i
+		}
+	}
+
+	if fromIdx == -1 {
+		return fmt.Errorf("invalid FROM type provided, not in the conversion chain: %s", fromType.Name())
+	}
+
+	if toIdx == -1 {
+		return fmt.Errorf("invalid TO type provided, not in the conversion chain: %s", toType.Name())
+	}
+
+	last := from
+	for i := fromIdx; i != toIdx; {
+		// skip the first index, because that is the from type - start with the next conversion in the chain
+		if fromIdx < toIdx {
+			i++
+		} else {
+			i--
+		}
+
+		var next interface{}
+		if i == toIdx {
+			next = to
+		} else {
+			nextVal := reflect.New(c.Types[i])
+			next = nextVal.Interface() // this will be a pointer, which is fine to pass to both from and to in Convert
+		}
+
+		if err = Convert(last, next); err != nil {
+			return err
+		}
+
+		last = next
+	}
+
+	return nil
+}

+ 334 - 0
vendor/github.com/anchore/go-struct-converter/converter.go

@@ -0,0 +1,334 @@
+package converter
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+)
+
+// ConvertFrom interface allows structs to define custom conversion functions if the automated reflection-based Convert
+// is not able to convert properties due to name changes or other factors.
+type ConvertFrom interface {
+	ConvertFrom(interface{}) error
+}
+
+// Convert takes two objects, e.g. v2_1.Document and &v2_2.Document{} and attempts to map all the properties from one
+// to the other. After the automatic mapping, if a struct implements the ConvertFrom interface, this is called to
+// perform any additional conversion logic necessary.
+func Convert(from interface{}, to interface{}) error {
+	fromValue := reflect.ValueOf(from)
+
+	toValuePtr := reflect.ValueOf(to)
+	toTypePtr := toValuePtr.Type()
+
+	if !isPtr(toTypePtr) {
+		return fmt.Errorf("TO value provided was not a pointer, unable to set value: %v", to)
+	}
+
+	toValue, err := getValue(fromValue, toTypePtr)
+	if err != nil {
+		return err
+	}
+
+	// don't set nil values
+	if toValue == nilValue {
+		return nil
+	}
+
+	// toValuePtr is the passed-in pointer, toValue is also the same type of pointer
+	toValuePtr.Elem().Set(toValue.Elem())
+	return nil
+}
+
+func getValue(fromValue reflect.Value, targetType reflect.Type) (reflect.Value, error) {
+	var err error
+
+	fromType := fromValue.Type()
+
+	var toValue reflect.Value
+
+	// handle incoming pointer Types
+	if isPtr(fromType) {
+		if fromValue.IsNil() {
+			return nilValue, nil
+		}
+		fromValue = fromValue.Elem()
+		if !fromValue.IsValid() || fromValue.IsZero() {
+			return nilValue, nil
+		}
+		fromType = fromValue.Type()
+	}
+
+	baseTargetType := targetType
+	if isPtr(targetType) {
+		baseTargetType = targetType.Elem()
+	}
+
+	switch {
+	case isStruct(fromType) && isStruct(baseTargetType):
+		// this always creates a pointer type
+		toValue = reflect.New(baseTargetType)
+		toValue = toValue.Elem()
+
+		for i := 0; i < fromType.NumField(); i++ {
+			fromField := fromType.Field(i)
+			fromFieldValue := fromValue.Field(i)
+
+			toField, exists := baseTargetType.FieldByName(fromField.Name)
+			if !exists {
+				continue
+			}
+			toFieldType := toField.Type
+
+			toFieldValue := toValue.FieldByName(toField.Name)
+
+			newValue, err := getValue(fromFieldValue, toFieldType)
+			if err != nil {
+				return nilValue, err
+			}
+
+			if newValue == nilValue {
+				continue
+			}
+
+			toFieldValue.Set(newValue)
+		}
+
+		// allow structs to implement a custom convert function from previous/next version struct
+		if reflect.PtrTo(baseTargetType).Implements(convertFromType) {
+			convertFrom := toValue.Addr().MethodByName(convertFromName)
+			if !convertFrom.IsValid() {
+				return nilValue, fmt.Errorf("unable to get ConvertFrom method")
+			}
+			args := []reflect.Value{fromValue}
+			out := convertFrom.Call(args)
+			err := out[0].Interface()
+			if err != nil {
+				return nilValue, fmt.Errorf("an error occurred calling %s.%s: %v", baseTargetType.Name(), convertFromName, err)
+			}
+		}
+	case isSlice(fromType) && isSlice(baseTargetType):
+		if fromValue.IsNil() {
+			return nilValue, nil
+		}
+
+		length := fromValue.Len()
+		targetElementType := baseTargetType.Elem()
+		toValue = reflect.MakeSlice(baseTargetType, length, length)
+		for i := 0; i < length; i++ {
+			v, err := getValue(fromValue.Index(i), targetElementType)
+			if err != nil {
+				return nilValue, err
+			}
+			if v.IsValid() {
+				toValue.Index(i).Set(v)
+			}
+		}
+	case isMap(fromType) && isMap(baseTargetType):
+		if fromValue.IsNil() {
+			return nilValue, nil
+		}
+
+		keyType := baseTargetType.Key()
+		elementType := baseTargetType.Elem()
+		toValue = reflect.MakeMap(baseTargetType)
+		for _, fromKey := range fromValue.MapKeys() {
+			fromVal := fromValue.MapIndex(fromKey)
+			k, err := getValue(fromKey, keyType)
+			if err != nil {
+				return nilValue, err
+			}
+			v, err := getValue(fromVal, elementType)
+			if err != nil {
+				return nilValue, err
+			}
+			if k == nilValue || v == nilValue {
+				continue
+			}
+			if v == nilValue {
+				continue
+			}
+			if k.IsValid() && v.IsValid() {
+				toValue.SetMapIndex(k, v)
+			}
+		}
+	default:
+		// TODO determine if there are other conversions
+		toValue = fromValue
+	}
+
+	// handle non-pointer returns -- the reflect.New earlier always creates a pointer
+	if !isPtr(baseTargetType) {
+		toValue = fromPtr(toValue)
+	}
+
+	toValue, err = convertValueTypes(toValue, baseTargetType)
+
+	if err != nil {
+		return nilValue, err
+	}
+
+	// handle elements which are now pointers
+	if isPtr(targetType) {
+		toValue = toPtr(toValue)
+	}
+
+	return toValue, nil
+}
+
+// convertValueTypes takes a value and a target type, and attempts to convert
+// between the Types - e.g. string -> int. when this function is called the value
+func convertValueTypes(value reflect.Value, targetType reflect.Type) (reflect.Value, error) {
+	typ := value.Type()
+	switch {
+	// if the Types are the same, just return the value
+	case typ.Kind() == targetType.Kind():
+		return value, nil
+	case value.IsZero() && isPrimitive(targetType):
+
+	case isPrimitive(typ) && isPrimitive(targetType):
+		// get a string representation of the value
+		str := fmt.Sprintf("%v", value.Interface()) // TODO is there a better way to get a string representation?
+		var err error
+		var out interface{}
+		switch {
+		case isString(targetType):
+			out = str
+		case isBool(targetType):
+			out, err = strconv.ParseBool(str)
+		case isInt(targetType):
+			out, err = strconv.Atoi(str)
+		case isUint(targetType):
+			out, err = strconv.ParseUint(str, 10, 64)
+		case isFloat(targetType):
+			out, err = strconv.ParseFloat(str, 64)
+		}
+
+		if err != nil {
+			return nilValue, err
+		}
+
+		v := reflect.ValueOf(out)
+
+		v = v.Convert(targetType)
+
+		return v, nil
+	case isSlice(typ) && isSlice(targetType):
+		// this should already be handled in getValue
+	case isSlice(typ):
+		// this may be lossy
+		if value.Len() > 0 {
+			v := value.Index(0)
+			v, err := convertValueTypes(v, targetType)
+			if err != nil {
+				return nilValue, err
+			}
+			return v, nil
+		}
+		return convertValueTypes(nilValue, targetType)
+	case isSlice(targetType):
+		elementType := targetType.Elem()
+		v, err := convertValueTypes(value, elementType)
+		if err != nil {
+			return nilValue, err
+		}
+		if v == nilValue {
+			return v, nil
+		}
+		slice := reflect.MakeSlice(targetType, 1, 1)
+		slice.Index(0).Set(v)
+		return slice, nil
+	}
+
+	return nilValue, fmt.Errorf("unable to convert from: %v to %v", value.Interface(), targetType.Name())
+}
+
+func isPtr(typ reflect.Type) bool {
+	return typ.Kind() == reflect.Ptr
+}
+
+func isPrimitive(typ reflect.Type) bool {
+	return isString(typ) || isBool(typ) || isInt(typ) || isUint(typ) || isFloat(typ)
+}
+
+func isString(typ reflect.Type) bool {
+	return typ.Kind() == reflect.String
+}
+
+func isBool(typ reflect.Type) bool {
+	return typ.Kind() == reflect.Bool
+}
+
+func isInt(typ reflect.Type) bool {
+	switch typ.Kind() {
+	case reflect.Int,
+		reflect.Int8,
+		reflect.Int16,
+		reflect.Int32,
+		reflect.Int64:
+		return true
+	}
+	return false
+}
+
+func isUint(typ reflect.Type) bool {
+	switch typ.Kind() {
+	case reflect.Uint,
+		reflect.Uint8,
+		reflect.Uint16,
+		reflect.Uint32,
+		reflect.Uint64:
+		return true
+	}
+	return false
+}
+
+func isFloat(typ reflect.Type) bool {
+	switch typ.Kind() {
+	case reflect.Float32,
+		reflect.Float64:
+		return true
+	}
+	return false
+}
+
+func isStruct(typ reflect.Type) bool {
+	return typ.Kind() == reflect.Struct
+}
+
+func isSlice(typ reflect.Type) bool {
+	return typ.Kind() == reflect.Slice
+}
+
+func isMap(typ reflect.Type) bool {
+	return typ.Kind() == reflect.Map
+}
+
+func toPtr(val reflect.Value) reflect.Value {
+	typ := val.Type()
+	if !isPtr(typ) {
+		// this creates a pointer type inherently
+		ptrVal := reflect.New(typ)
+		ptrVal.Elem().Set(val)
+		val = ptrVal
+	}
+	return val
+}
+
+func fromPtr(val reflect.Value) reflect.Value {
+	if isPtr(val.Type()) {
+		val = val.Elem()
+	}
+	return val
+}
+
+// convertFromName constant to find the ConvertFrom method
+const convertFromName = "ConvertFrom"
+
+var (
+	// nilValue is returned in a number of cases when a value should not be set
+	nilValue = reflect.ValueOf(nil)
+
+	// convertFromType is the type to check for ConvertFrom implementations
+	convertFromType = reflect.TypeOf((*ConvertFrom)(nil)).Elem()
+)

+ 0 - 1
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go

@@ -17,7 +17,6 @@ const (
 	LayerAnnotationNydusBlob          = "containerd.io/snapshot/nydus-blob"
 	LayerAnnotationNydusBlob          = "containerd.io/snapshot/nydus-blob"
 	LayerAnnotationNydusBlobDigest    = "containerd.io/snapshot/nydus-blob-digest"
 	LayerAnnotationNydusBlobDigest    = "containerd.io/snapshot/nydus-blob-digest"
 	LayerAnnotationNydusBlobSize      = "containerd.io/snapshot/nydus-blob-size"
 	LayerAnnotationNydusBlobSize      = "containerd.io/snapshot/nydus-blob-size"
-	LayerAnnotationNydusBlobIDs       = "containerd.io/snapshot/nydus-blob-ids"
 	LayerAnnotationNydusBootstrap     = "containerd.io/snapshot/nydus-bootstrap"
 	LayerAnnotationNydusBootstrap     = "containerd.io/snapshot/nydus-bootstrap"
 	LayerAnnotationNydusSourceChainID = "containerd.io/snapshot/nydus-source-chainid"
 	LayerAnnotationNydusSourceChainID = "containerd.io/snapshot/nydus-source-chainid"
 
 

+ 464 - 166
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go

@@ -11,9 +11,10 @@ package converter
 
 
 import (
 import (
 	"archive/tar"
 	"archive/tar"
+	"bytes"
 	"compress/gzip"
 	"compress/gzip"
 	"context"
 	"context"
-	"encoding/json"
+	"encoding/binary"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"os"
 	"os"
@@ -24,10 +25,12 @@ import (
 	"github.com/containerd/containerd/archive"
 	"github.com/containerd/containerd/archive"
 	"github.com/containerd/containerd/archive/compression"
 	"github.com/containerd/containerd/archive/compression"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/images/converter"
 	"github.com/containerd/containerd/images/converter"
 	"github.com/containerd/containerd/labels"
 	"github.com/containerd/containerd/labels"
 	"github.com/containerd/fifo"
 	"github.com/containerd/fifo"
+	"github.com/klauspost/compress/zstd"
 	"github.com/opencontainers/go-digest"
 	"github.com/opencontainers/go-digest"
 	"github.com/opencontainers/image-spec/identity"
 	"github.com/opencontainers/image-spec/identity"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
@@ -35,11 +38,14 @@ import (
 	"golang.org/x/sync/errgroup"
 	"golang.org/x/sync/errgroup"
 
 
 	"github.com/containerd/nydus-snapshotter/pkg/converter/tool"
 	"github.com/containerd/nydus-snapshotter/pkg/converter/tool"
-	"github.com/containerd/nydus-snapshotter/pkg/errdefs"
+	"github.com/containerd/nydus-snapshotter/pkg/label"
 )
 )
 
 
-const bootstrapNameInTar = "image.boot"
-const blobNameInTar = "image.blob"
+const EntryBlob = "image.blob"
+const EntryBootstrap = "image.boot"
+const EntryBlobMeta = "blob.meta"
+const EntryBlobMetaHeader = "blob.meta.header"
+const EntryTOC = "rafs.blob.toc"
 
 
 const envNydusBuilder = "NYDUS_BUILDER"
 const envNydusBuilder = "NYDUS_BUILDER"
 const envNydusWorkDir = "NYDUS_WORKDIR"
 const envNydusWorkDir = "NYDUS_WORKDIR"
@@ -113,152 +119,190 @@ func unpackOciTar(ctx context.Context, dst string, reader io.Reader) error {
 	return nil
 	return nil
 }
 }
 
 
-// Unpack a Nydus formatted tar stream into a directory.
-func unpackNydusTar(ctx context.Context, bootDst, blobDst string, ra content.ReaderAt) error {
+// unpackNydusBlob unpacks a Nydus formatted tar stream into a directory.
+// unpackBlob indicates whether to unpack blob data.
+func unpackNydusBlob(bootDst, blobDst string, ra content.ReaderAt, unpackBlob bool) error {
 	boot, err := os.OpenFile(bootDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
 	boot, err := os.OpenFile(bootDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
 	if err != nil {
 	if err != nil {
 		return errors.Wrapf(err, "write to bootstrap %s", bootDst)
 		return errors.Wrapf(err, "write to bootstrap %s", bootDst)
 	}
 	}
 	defer boot.Close()
 	defer boot.Close()
 
 
-	if err = unpackBootstrapFromNydusTar(ctx, ra, boot); err != nil {
+	if _, err = UnpackEntry(ra, EntryBootstrap, boot); err != nil {
 		return errors.Wrap(err, "unpack bootstrap from nydus")
 		return errors.Wrap(err, "unpack bootstrap from nydus")
 	}
 	}
 
 
-	blob, err := os.OpenFile(blobDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
-	if err != nil {
-		return errors.Wrapf(err, "write to blob %s", blobDst)
-	}
-	defer blob.Close()
+	if unpackBlob {
+		blob, err := os.OpenFile(blobDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
+		if err != nil {
+			return errors.Wrapf(err, "write to blob %s", blobDst)
+		}
+		defer blob.Close()
 
 
-	if err = unpackBlobFromNydusTar(ctx, ra, blob); err != nil {
-		return errors.Wrap(err, "unpack blob from nydus")
+		if _, err = UnpackEntry(ra, EntryBlob, blob); err != nil {
+			if errors.Is(err, ErrNotFound) {
+				// The nydus layer may contain only bootstrap and no blob
+				// data, which should be ignored.
+				return nil
+			}
+			return errors.Wrap(err, "unpack blob from nydus")
+		}
 	}
 	}
 
 
 	return nil
 	return nil
 }
 }
 
 
-// Unpack the bootstrap from nydus formatted tar stream (blob + bootstrap).
-// The nydus formatted tar stream is a tar-like structure that arranges the
-// data as follows:
-//
-// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header`
-func unpackBootstrapFromNydusTar(ctx context.Context, ra content.ReaderAt, target io.Writer) error {
-	cur := ra.Size()
-	reader := newSeekReader(ra)
-
+func seekFileByTarHeader(ra content.ReaderAt, targetName string, handle func(io.Reader, *tar.Header) error) error {
 	const headerSize = 512
 	const headerSize = 512
 
 
-	// Seek from tail to head of nydus formatted tar stream to find nydus
-	// bootstrap data.
-	for {
-		if headerSize > cur {
-			return fmt.Errorf("invalid tar format at pos %d", cur)
-		}
+	if headerSize > ra.Size() {
+		return fmt.Errorf("invalid nydus tar size %d", ra.Size())
+	}
 
 
-		// Try to seek to the part of tar header.
-		var err error
-		cur, err = reader.Seek(cur-headerSize, io.SeekCurrent)
+	cur := ra.Size() - headerSize
+	reader := newSeekReader(ra)
+
+	// Seek from tail to head of nydus formatted tar stream to find
+	// target data.
+	for {
+		// Try to seek the part of tar header.
+		_, err := reader.Seek(cur, io.SeekStart)
 		if err != nil {
 		if err != nil {
-			return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize)
+			return errors.Wrapf(err, "seek %d for nydus tar header", cur)
 		}
 		}
 
 
-		tr := tar.NewReader(reader)
 		// Parse tar header.
 		// Parse tar header.
+		tr := tar.NewReader(reader)
 		hdr, err := tr.Next()
 		hdr, err := tr.Next()
 		if err != nil {
 		if err != nil {
-			return errors.Wrap(err, "parse tar header")
+			return errors.Wrap(err, "parse nydus tar header")
 		}
 		}
 
 
-		if hdr.Name == bootstrapNameInTar {
-			// Try to seek to the part of tar data (bootstrap_data).
-			if hdr.Size > cur {
-				return fmt.Errorf("invalid tar format at pos %d", cur)
-			}
-			bootstrapOffset := cur - hdr.Size
-			_, err = reader.Seek(bootstrapOffset, io.SeekStart)
+		if cur < hdr.Size {
+			return fmt.Errorf("invalid nydus tar data, name %s, size %d", hdr.Name, hdr.Size)
+		}
+
+		if hdr.Name == targetName {
+			// Try to seek the part of tar data.
+			_, err = reader.Seek(cur-hdr.Size, io.SeekStart)
 			if err != nil {
 			if err != nil {
-				return errors.Wrap(err, "seek to bootstrap data offset")
+				return errors.Wrap(err, "seek target data offset")
 			}
 			}
+			dataReader := io.NewSectionReader(reader, cur-hdr.Size, hdr.Size)
 
 
-			// Copy tar data (bootstrap_data) to provided target writer.
-			if _, err := io.CopyN(target, reader, hdr.Size); err != nil {
-				return errors.Wrap(err, "copy bootstrap data to reader")
+			if err := handle(dataReader, hdr); err != nil {
+				return errors.Wrap(err, "handle target data")
 			}
 			}
 
 
 			return nil
 			return nil
 		}
 		}
 
 
-		if cur == hdr.Size {
+		cur = cur - hdr.Size - headerSize
+		if cur < 0 {
 			break
 			break
 		}
 		}
 	}
 	}
 
 
-	return fmt.Errorf("can't find bootstrap in nydus tar")
+	return errors.Wrapf(ErrNotFound, "can't find target %s by seeking tar", targetName)
 }
 }
 
 
-// Unpack the blob from nydus formatted tar stream (blob + bootstrap).
-// The nydus formatted tar stream is a tar-like structure that arranges the
-// data as follows:
-//
-// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header`
-func unpackBlobFromNydusTar(ctx context.Context, ra content.ReaderAt, target io.Writer) error {
-	cur := ra.Size()
-	reader := newSeekReader(ra)
-
-	const headerSize = 512
-
-	// Seek from tail to head of nydus formatted tar stream to find nydus
-	// bootstrap data.
-	for {
-		if headerSize > cur {
-			break
-		}
+func seekFileByTOC(ra content.ReaderAt, targetName string, handle func(io.Reader, *tar.Header) error) (*TOCEntry, error) {
+	entrySize := 128
+	var tocEntry *TOCEntry
 
 
-		// Try to seek to the part of tar header.
-		var err error
-		cur, err = reader.Seek(cur-headerSize, io.SeekStart)
+	err := seekFileByTarHeader(ra, EntryTOC, func(tocEntryDataReader io.Reader, _ *tar.Header) error {
+		entryData, err := io.ReadAll(tocEntryDataReader)
 		if err != nil {
 		if err != nil {
-			return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize)
+			return errors.Wrap(err, "read toc entries")
 		}
 		}
-
-		tr := tar.NewReader(reader)
-		// Parse tar header.
-		hdr, err := tr.Next()
-		if err != nil {
-			return errors.Wrap(err, "parse tar header")
+		if len(entryData)%entrySize != 0 {
+			return fmt.Errorf("invalid entries length %d", len(entryData))
 		}
 		}
 
 
-		if hdr.Name == bootstrapNameInTar {
-			if hdr.Size > cur {
-				return fmt.Errorf("invalid tar format at pos %d", cur)
+		count := len(entryData) / entrySize
+		for i := 0; i < count; i++ {
+			var entry TOCEntry
+			r := bytes.NewReader(entryData[i*entrySize : i*entrySize+entrySize])
+			if err := binary.Read(r, binary.LittleEndian, &entry); err != nil {
+				return errors.Wrap(err, "read toc entries")
 			}
 			}
-			cur, err = reader.Seek(cur-hdr.Size, io.SeekStart)
-			if err != nil {
-				return errors.Wrap(err, "seek to bootstrap data offset")
-			}
-		} else if hdr.Name == blobNameInTar {
-			if hdr.Size > cur {
-				return fmt.Errorf("invalid tar format at pos %d", cur)
-			}
-			_, err = reader.Seek(cur-hdr.Size, io.SeekStart)
-			if err != nil {
-				return errors.Wrap(err, "seek to blob data offset")
-			}
-			if _, err := io.CopyN(target, reader, hdr.Size); err != nil {
-				return errors.Wrap(err, "copy blob data to reader")
+			if entry.GetName() == targetName {
+				compressor, err := entry.GetCompressor()
+				if err != nil {
+					return errors.Wrap(err, "get compressor of entry")
+				}
+				compressedOffset := int64(entry.GetCompressedOffset())
+				compressedSize := int64(entry.GetCompressedSize())
+				sr := io.NewSectionReader(ra, compressedOffset, compressedSize)
+
+				var rd io.Reader
+				switch compressor {
+				case CompressorZstd:
+					decoder, err := zstd.NewReader(sr)
+					if err != nil {
+						return errors.Wrap(err, "seek to target data offset")
+					}
+					defer decoder.Close()
+					rd = decoder
+				case CompressorNone:
+					rd = sr
+				default:
+					return fmt.Errorf("unsupported compressor %x", compressor)
+				}
+
+				if err := handle(rd, nil); err != nil {
+					return errors.Wrap(err, "handle target entry data")
+				}
+
+				tocEntry = &entry
+
+				return nil
 			}
 			}
-			return nil
 		}
 		}
+
+		return errors.Wrapf(ErrNotFound, "can't find target %s by seeking TOC", targetName)
+	})
+
+	return tocEntry, err
+}
+
+// Unpack the file from nydus formatted tar stream.
+// The nydus formatted tar stream is a tar-like structure that arranges the
+// data as follows:
+//
+// `data | tar_header | ... | data | tar_header | [toc_entry | ... | toc_entry | tar_header]`
+func UnpackEntry(ra content.ReaderAt, targetName string, target io.Writer) (*TOCEntry, error) {
+	handle := func(dataReader io.Reader, _ *tar.Header) error {
+		// Copy data to provided target writer.
+		if _, err := io.Copy(target, dataReader); err != nil {
+			return errors.Wrap(err, "copy target data to reader")
+		}
+
+		return nil
 	}
 	}
 
 
-	return nil
+	return seekFile(ra, targetName, handle)
+}
+
+func seekFile(ra content.ReaderAt, targetName string, handle func(io.Reader, *tar.Header) error) (*TOCEntry, error) {
+	// Try seek target data by TOC.
+	entry, err := seekFileByTOC(ra, targetName, handle)
+	if err != nil {
+		if !errors.Is(err, ErrNotFound) {
+			return nil, errors.Wrap(err, "seek file by TOC")
+		}
+	} else {
+		return entry, nil
+	}
+
+	// Seek target data by tar header, ensure compatible with old rafs blob format.
+	return nil, seekFileByTarHeader(ra, targetName, handle)
 }
 }
 
 
 // Pack converts an OCI tar stream to nydus formatted stream with a tar-like
 // Pack converts an OCI tar stream to nydus formatted stream with a tar-like
 // structure that arranges the data as follows:
 // structure that arranges the data as follows:
 //
 //
-// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header`
+// `data | tar_header | data | tar_header | [toc_entry | ... | toc_entry | tar_header]`
 //
 //
 // The caller should write OCI tar stream into the returned `io.WriteCloser`,
 // The caller should write OCI tar stream into the returned `io.WriteCloser`,
 // then the Pack method will write the nydus formatted stream to `dest`
 // then the Pack method will write the nydus formatted stream to `dest`
@@ -267,6 +311,24 @@ func unpackBlobFromNydusTar(ctx context.Context, ra content.ReaderAt, target io.
 // Important: the caller must check `io.WriteCloser.Close() == nil` to ensure
 // Important: the caller must check `io.WriteCloser.Close() == nil` to ensure
 // the conversion workflow is finished.
 // the conversion workflow is finished.
 func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, error) {
 func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, error) {
+	if opt.FsVersion == "" {
+		opt.FsVersion = "6"
+	}
+
+	builderPath := getBuilder(opt.BuilderPath)
+	opt.features = tool.DetectFeatures(builderPath, []tool.Feature{tool.FeatureTar2Rafs})
+
+	if opt.OCIRef {
+		if opt.FsVersion == "6" {
+			return packFromTar(ctx, dest, opt)
+		}
+		return nil, fmt.Errorf("oci ref can only be supported by fs version 6")
+	}
+
+	if opt.features.Contains(tool.FeatureTar2Rafs) {
+		return packFromTar(ctx, dest, opt)
+	}
+
 	workDir, err := ensureWorkDir(opt.WorkDir)
 	workDir, err := ensureWorkDir(opt.WorkDir)
 	if err != nil {
 	if err != nil {
 		return nil, errors.Wrap(err, "ensure work directory")
 		return nil, errors.Wrap(err, "ensure work directory")
@@ -295,9 +357,7 @@ func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser,
 	}()
 	}()
 
 
 	wc := newWriteCloser(pw, func() error {
 	wc := newWriteCloser(pw, func() error {
-		defer func() {
-			os.RemoveAll(workDir)
-		}()
+		defer os.RemoveAll(workDir)
 
 
 		// Because PipeWriter#Close is called does not mean that the PipeReader
 		// Because PipeWriter#Close is called does not mean that the PipeReader
 		// has finished reading all the data, and unpack may not be complete yet,
 		// has finished reading all the data, and unpack may not be complete yet,
@@ -313,15 +373,19 @@ func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser,
 
 
 		go func() {
 		go func() {
 			err := tool.Pack(tool.PackOption{
 			err := tool.Pack(tool.PackOption{
-				BuilderPath: getBuilder(opt.BuilderPath),
+				BuilderPath: builderPath,
 
 
 				BlobPath:         blobPath,
 				BlobPath:         blobPath,
 				FsVersion:        opt.FsVersion,
 				FsVersion:        opt.FsVersion,
 				SourcePath:       sourceDir,
 				SourcePath:       sourceDir,
 				ChunkDictPath:    opt.ChunkDictPath,
 				ChunkDictPath:    opt.ChunkDictPath,
 				PrefetchPatterns: opt.PrefetchPatterns,
 				PrefetchPatterns: opt.PrefetchPatterns,
+				AlignedChunk:     opt.AlignedChunk,
+				ChunkSize:        opt.ChunkSize,
 				Compressor:       opt.Compressor,
 				Compressor:       opt.Compressor,
 				Timeout:          opt.Timeout,
 				Timeout:          opt.Timeout,
+
+				Features: opt.features,
 			})
 			})
 			if err != nil {
 			if err != nil {
 				pw.CloseWithError(errors.Wrapf(err, "convert blob for %s", sourceDir))
 				pw.CloseWithError(errors.Wrapf(err, "convert blob for %s", sourceDir))
@@ -341,6 +405,117 @@ func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser,
 	return wc, nil
 	return wc, nil
 }
 }
 
 
+func packFromTar(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, error) {
+	workDir, err := ensureWorkDir(opt.WorkDir)
+	if err != nil {
+		return nil, errors.Wrap(err, "ensure work directory")
+	}
+	defer func() {
+		if err != nil {
+			os.RemoveAll(workDir)
+		}
+	}()
+
+	rafsBlobPath := filepath.Join(workDir, "blob.rafs")
+	rafsBlobFifo, err := fifo.OpenFifo(ctx, rafsBlobPath, syscall.O_CREAT|syscall.O_RDONLY|syscall.O_NONBLOCK, 0644)
+	if err != nil {
+		return nil, errors.Wrapf(err, "create fifo file")
+	}
+
+	tarBlobPath := filepath.Join(workDir, "blob.targz")
+	tarBlobFifo, err := fifo.OpenFifo(ctx, tarBlobPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_NONBLOCK, 0644)
+	if err != nil {
+		defer rafsBlobFifo.Close()
+		return nil, errors.Wrapf(err, "create fifo file")
+	}
+
+	pr, pw := io.Pipe()
+	eg := errgroup.Group{}
+
+	wc := newWriteCloser(pw, func() error {
+		defer os.RemoveAll(workDir)
+		if err := eg.Wait(); err != nil {
+			return errors.Wrapf(err, "convert nydus ref")
+		}
+		return nil
+	})
+
+	eg.Go(func() error {
+		defer tarBlobFifo.Close()
+		buffer := bufPool.Get().(*[]byte)
+		defer bufPool.Put(buffer)
+		if _, err := io.CopyBuffer(tarBlobFifo, pr, *buffer); err != nil {
+			return errors.Wrapf(err, "copy targz to fifo")
+		}
+		return nil
+	})
+
+	eg.Go(func() error {
+		defer rafsBlobFifo.Close()
+		buffer := bufPool.Get().(*[]byte)
+		defer bufPool.Put(buffer)
+		if _, err := io.CopyBuffer(dest, rafsBlobFifo, *buffer); err != nil {
+			return errors.Wrapf(err, "copy blob meta fifo to nydus blob")
+		}
+		return nil
+	})
+
+	eg.Go(func() error {
+		var err error
+		if opt.OCIRef {
+			err = tool.Pack(tool.PackOption{
+				BuilderPath: getBuilder(opt.BuilderPath),
+
+				OCIRef:     opt.OCIRef,
+				BlobPath:   rafsBlobPath,
+				SourcePath: tarBlobPath,
+				Timeout:    opt.Timeout,
+
+				Features: opt.features,
+			})
+		} else {
+			err = tool.Pack(tool.PackOption{
+				BuilderPath: getBuilder(opt.BuilderPath),
+
+				BlobPath:         rafsBlobPath,
+				FsVersion:        opt.FsVersion,
+				SourcePath:       tarBlobPath,
+				ChunkDictPath:    opt.ChunkDictPath,
+				PrefetchPatterns: opt.PrefetchPatterns,
+				AlignedChunk:     opt.AlignedChunk,
+				ChunkSize:        opt.ChunkSize,
+				BatchSize:        opt.BatchSize,
+				Compressor:       opt.Compressor,
+				Timeout:          opt.Timeout,
+
+				Features: opt.features,
+			})
+		}
+		if err != nil {
+			// Without handling the returned error because we just only
+			// focus on the command exit status in `tool.Pack`.
+			wc.Close()
+		}
+		return errors.Wrapf(err, "call builder")
+	})
+
+	return wc, nil
+}
+
+func calcBlobTOCDigest(ra content.ReaderAt) (*digest.Digest, error) {
+	digester := digest.Canonical.Digester()
+	if err := seekFileByTarHeader(ra, EntryTOC, func(tocData io.Reader, _ *tar.Header) error {
+		if _, err := io.Copy(digester.Hash(), tocData); err != nil {
+			return errors.Wrap(err, "calc toc data and header digest")
+		}
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+	tocDigest := digester.Digest()
+	return &tocDigest, nil
+}
+
 // Merge multiple nydus bootstraps (from each layer of image) to a final
 // Merge multiple nydus bootstraps (from each layer of image) to a final
 // bootstrap. And due to the possibility of enabling the `ChunkDictPath`
 // bootstrap. And due to the possibility of enabling the `ChunkDictPath`
 // option causes the data deduplication, it will return the actual blob
 // option causes the data deduplication, it will return the actual blob
@@ -352,22 +527,40 @@ func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption)
 	}
 	}
 	defer os.RemoveAll(workDir)
 	defer os.RemoveAll(workDir)
 
 
-	eg, ctx := errgroup.WithContext(ctx)
+	getBootstrapPath := func(layerIdx int) string {
+		digestHex := layers[layerIdx].Digest.Hex()
+		if originalDigest := layers[layerIdx].OriginalDigest; originalDigest != nil {
+			return filepath.Join(workDir, originalDigest.Hex())
+		}
+		return filepath.Join(workDir, digestHex)
+	}
+
+	eg, _ := errgroup.WithContext(ctx)
 	sourceBootstrapPaths := []string{}
 	sourceBootstrapPaths := []string{}
+	rafsBlobDigests := []string{}
+	rafsBlobSizes := []int64{}
+	rafsBlobTOCDigests := []string{}
 	for idx := range layers {
 	for idx := range layers {
-		sourceBootstrapPaths = append(sourceBootstrapPaths, filepath.Join(workDir, layers[idx].Digest.Hex()))
+		sourceBootstrapPaths = append(sourceBootstrapPaths, getBootstrapPath(idx))
+		if layers[idx].OriginalDigest != nil {
+			rafsBlobDigests = append(rafsBlobDigests, layers[idx].Digest.Hex())
+			rafsBlobSizes = append(rafsBlobSizes, layers[idx].ReaderAt.Size())
+			rafsBlobTOCDigest, err := calcBlobTOCDigest(layers[idx].ReaderAt)
+			if err != nil {
+				return nil, errors.Wrapf(err, "calc blob toc digest for layer %s", layers[idx].Digest)
+			}
+			rafsBlobTOCDigests = append(rafsBlobTOCDigests, rafsBlobTOCDigest.Hex())
+		}
 		eg.Go(func(idx int) func() error {
 		eg.Go(func(idx int) func() error {
 			return func() error {
 			return func() error {
-				layer := layers[idx]
-
 				// Use the hex hash string of whole tar blob as the bootstrap name.
 				// Use the hex hash string of whole tar blob as the bootstrap name.
-				bootstrap, err := os.Create(filepath.Join(workDir, layer.Digest.Hex()))
+				bootstrap, err := os.Create(getBootstrapPath(idx))
 				if err != nil {
 				if err != nil {
 					return errors.Wrap(err, "create source bootstrap")
 					return errors.Wrap(err, "create source bootstrap")
 				}
 				}
 				defer bootstrap.Close()
 				defer bootstrap.Close()
 
 
-				if err := unpackBootstrapFromNydusTar(ctx, layer.ReaderAt, bootstrap); err != nil {
+				if _, err := UnpackEntry(layers[idx].ReaderAt, EntryBootstrap, bootstrap); err != nil {
 					return errors.Wrap(err, "unpack nydus tar")
 					return errors.Wrap(err, "unpack nydus tar")
 				}
 				}
 
 
@@ -386,11 +579,16 @@ func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption)
 		BuilderPath: getBuilder(opt.BuilderPath),
 		BuilderPath: getBuilder(opt.BuilderPath),
 
 
 		SourceBootstrapPaths: sourceBootstrapPaths,
 		SourceBootstrapPaths: sourceBootstrapPaths,
-		TargetBootstrapPath:  targetBootstrapPath,
-		ChunkDictPath:        opt.ChunkDictPath,
-		PrefetchPatterns:     opt.PrefetchPatterns,
-		OutputJSONPath:       filepath.Join(workDir, "merge-output.json"),
-		Timeout:              opt.Timeout,
+		RafsBlobDigests:      rafsBlobDigests,
+		RafsBlobSizes:        rafsBlobSizes,
+		RafsBlobTOCDigests:   rafsBlobTOCDigests,
+
+		TargetBootstrapPath: targetBootstrapPath,
+		ChunkDictPath:       opt.ChunkDictPath,
+		ParentBootstrapPath: opt.ParentBootstrapPath,
+		PrefetchPatterns:    opt.PrefetchPatterns,
+		OutputJSONPath:      filepath.Join(workDir, "merge-output.json"),
+		Timeout:             opt.Timeout,
 	})
 	})
 	if err != nil {
 	if err != nil {
 		return nil, errors.Wrap(err, "merge bootstrap")
 		return nil, errors.Wrap(err, "merge bootstrap")
@@ -399,7 +597,7 @@ func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption)
 	var rc io.ReadCloser
 	var rc io.ReadCloser
 
 
 	if opt.WithTar {
 	if opt.WithTar {
-		rc, err = packToTar(targetBootstrapPath, fmt.Sprintf("image/%s", bootstrapNameInTar), false)
+		rc, err = packToTar(targetBootstrapPath, fmt.Sprintf("image/%s", EntryBootstrap), false)
 		if err != nil {
 		if err != nil {
 			return nil, errors.Wrap(err, "pack bootstrap to tar")
 			return nil, errors.Wrap(err, "pack bootstrap to tar")
 		}
 		}
@@ -428,8 +626,8 @@ func Unpack(ctx context.Context, ra content.ReaderAt, dest io.Writer, opt Unpack
 	}
 	}
 	defer os.RemoveAll(workDir)
 	defer os.RemoveAll(workDir)
 
 
-	bootPath, blobPath := filepath.Join(workDir, bootstrapNameInTar), filepath.Join(workDir, blobNameInTar)
-	if err = unpackNydusTar(ctx, bootPath, blobPath, ra); err != nil {
+	bootPath, blobPath := filepath.Join(workDir, EntryBootstrap), filepath.Join(workDir, EntryBlob)
+	if err = unpackNydusBlob(bootPath, blobPath, ra, !opt.Stream); err != nil {
 		return errors.Wrap(err, "unpack nydus tar")
 		return errors.Wrap(err, "unpack nydus tar")
 	}
 	}
 
 
@@ -440,16 +638,35 @@ func Unpack(ctx context.Context, ra content.ReaderAt, dest io.Writer, opt Unpack
 	}
 	}
 	defer blobFifo.Close()
 	defer blobFifo.Close()
 
 
+	unpackOpt := tool.UnpackOption{
+		BuilderPath:   getBuilder(opt.BuilderPath),
+		BootstrapPath: bootPath,
+		BlobPath:      blobPath,
+		TarPath:       tarPath,
+		Timeout:       opt.Timeout,
+	}
+
+	if opt.Stream {
+		proxy, err := setupContentStoreProxy(opt.WorkDir, ra)
+		if err != nil {
+			return errors.Wrap(err, "new content store proxy")
+		}
+		defer proxy.close()
+
+		// generate backend config file
+		backendConfigStr := fmt.Sprintf(`{"version":2,"backend":{"type":"http-proxy","http-proxy":{"addr":"%s"}}}`, proxy.socketPath)
+		backendConfigPath := filepath.Join(workDir, "backend-config.json")
+		if err := os.WriteFile(backendConfigPath, []byte(backendConfigStr), 0644); err != nil {
+			return errors.Wrap(err, "write backend config")
+		}
+		unpackOpt.BlobPath = ""
+		unpackOpt.BackendConfigPath = backendConfigPath
+	}
+
 	unpackErrChan := make(chan error)
 	unpackErrChan := make(chan error)
 	go func() {
 	go func() {
 		defer close(unpackErrChan)
 		defer close(unpackErrChan)
-		err := tool.Unpack(tool.UnpackOption{
-			BuilderPath:   getBuilder(opt.BuilderPath),
-			BootstrapPath: bootPath,
-			BlobPath:      blobPath,
-			TarPath:       tarPath,
-			Timeout:       opt.Timeout,
-		})
+		err := tool.Unpack(unpackOpt)
 		if err != nil {
 		if err != nil {
 			blobFifo.Close()
 			blobFifo.Close()
 			unpackErrChan <- err
 			unpackErrChan <- err
@@ -476,11 +693,11 @@ func IsNydusBlobAndExists(ctx context.Context, cs content.Store, desc ocispec.De
 		return false
 		return false
 	}
 	}
 
 
-	return IsNydusBlob(ctx, desc)
+	return IsNydusBlob(desc)
 }
 }
 
 
-// IsNydusBlob returns true when the specified descriptor is nydus blob format.
-func IsNydusBlob(ctx context.Context, desc ocispec.Descriptor) bool {
+// IsNydusBlob returns true when the specified descriptor is nydus blob layer.
+func IsNydusBlob(desc ocispec.Descriptor) bool {
 	if desc.Annotations == nil {
 	if desc.Annotations == nil {
 		return false
 		return false
 	}
 	}
@@ -489,6 +706,16 @@ func IsNydusBlob(ctx context.Context, desc ocispec.Descriptor) bool {
 	return hasAnno
 	return hasAnno
 }
 }
 
 
+// IsNydusBootstrap returns true when the specified descriptor is nydus bootstrap layer.
+func IsNydusBootstrap(desc ocispec.Descriptor) bool {
+	if desc.Annotations == nil {
+		return false
+	}
+
+	_, hasAnno := desc.Annotations[LayerAnnotationNydusBootstrap]
+	return hasAnno
+}
+
 // LayerConvertFunc returns a function which converts an OCI image layer to
 // LayerConvertFunc returns a function which converts an OCI image layer to
 // a nydus blob layer, and set the media type to "application/vnd.oci.image.layer.nydus.blob.v1".
 // a nydus blob layer, and set the media type to "application/vnd.oci.image.layer.nydus.blob.v1".
 func LayerConvertFunc(opt PackOption) converter.ConvertFunc {
 func LayerConvertFunc(opt PackOption) converter.ConvertFunc {
@@ -497,6 +724,11 @@ func LayerConvertFunc(opt PackOption) converter.ConvertFunc {
 			return nil, nil
 			return nil, nil
 		}
 		}
 
 
+		// Skip the conversion of nydus layer.
+		if IsNydusBlob(desc) || IsNydusBootstrap(desc) {
+			return nil, nil
+		}
+
 		ra, err := cs.ReaderAt(ctx, desc)
 		ra, err := cs.ReaderAt(ctx, desc)
 		if err != nil {
 		if err != nil {
 			return nil, errors.Wrap(err, "get source blob reader")
 			return nil, errors.Wrap(err, "get source blob reader")
@@ -511,9 +743,14 @@ func LayerConvertFunc(opt PackOption) converter.ConvertFunc {
 		}
 		}
 		defer dst.Close()
 		defer dst.Close()
 
 
-		tr, err := compression.DecompressStream(rdr)
-		if err != nil {
-			return nil, errors.Wrap(err, "decompress blob stream")
+		var tr io.ReadCloser
+		if opt.OCIRef {
+			tr = io.NopCloser(rdr)
+		} else {
+			tr, err = compression.DecompressStream(rdr)
+			if err != nil {
+				return nil, errors.Wrap(err, "decompress blob stream")
+			}
 		}
 		}
 
 
 		digester := digest.SHA256.Digester()
 		digester := digest.SHA256.Digester()
@@ -574,14 +811,12 @@ func LayerConvertFunc(opt PackOption) converter.ConvertFunc {
 			},
 			},
 		}
 		}
 
 
-		if opt.Backend != nil {
-			blobRa, err := cs.ReaderAt(ctx, newDesc)
-			if err != nil {
-				return nil, errors.Wrap(err, "get nydus blob reader")
-			}
-			defer blobRa.Close()
+		if opt.OCIRef {
+			newDesc.Annotations[label.NydusRefLayer] = desc.Digest.String()
+		}
 
 
-			if err := opt.Backend.Push(ctx, blobRa, blobDigest); err != nil {
+		if opt.Backend != nil {
+			if err := opt.Backend.Push(ctx, cs, newDesc); err != nil {
 				return nil, errors.Wrap(err, "push to storage backend")
 				return nil, errors.Wrap(err, "push to storage backend")
 			}
 			}
 		}
 		}
@@ -595,11 +830,15 @@ func LayerConvertFunc(opt PackOption) converter.ConvertFunc {
 // the index conversion and the manifest conversion.
 // the index conversion and the manifest conversion.
 func ConvertHookFunc(opt MergeOption) converter.ConvertHookFunc {
 func ConvertHookFunc(opt MergeOption) converter.ConvertHookFunc {
 	return func(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error) {
 	return func(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error) {
+		// If the previous conversion did not occur, the `newDesc` may be nil.
+		if newDesc == nil {
+			return &orgDesc, nil
+		}
 		switch {
 		switch {
 		case images.IsIndexType(newDesc.MediaType):
 		case images.IsIndexType(newDesc.MediaType):
 			return convertIndex(ctx, cs, orgDesc, newDesc)
 			return convertIndex(ctx, cs, orgDesc, newDesc)
 		case images.IsManifestType(newDesc.MediaType):
 		case images.IsManifestType(newDesc.MediaType):
-			return convertManifest(ctx, cs, newDesc, opt)
+			return convertManifest(ctx, cs, orgDesc, newDesc, opt)
 		default:
 		default:
 			return newDesc, nil
 			return newDesc, nil
 		}
 		}
@@ -636,6 +875,13 @@ func convertIndex(ctx context.Context, cs content.Store, orgDesc ocispec.Descrip
 		manifest.Platform.OSFeatures = append(manifest.Platform.OSFeatures, ManifestOSFeatureNydus)
 		manifest.Platform.OSFeatures = append(manifest.Platform.OSFeatures, ManifestOSFeatureNydus)
 		index.Manifests[i] = manifest
 		index.Manifests[i] = manifest
 	}
 	}
+
+	// If the converted manifest list contains only one manifest,
+	// convert it directly to manifest.
+	if len(index.Manifests) == 1 {
+		return &index.Manifests[0], nil
+	}
+
 	// Update image index in content store.
 	// Update image index in content store.
 	newIndexDesc, err := writeJSON(ctx, cs, index, *newDesc, indexLabels)
 	newIndexDesc, err := writeJSON(ctx, cs, index, *newDesc, indexLabels)
 	if err != nil {
 	if err != nil {
@@ -644,10 +890,23 @@ func convertIndex(ctx context.Context, cs content.Store, orgDesc ocispec.Descrip
 	return newIndexDesc, nil
 	return newIndexDesc, nil
 }
 }
 
 
+// isNydusImage checks if the last layer is nydus bootstrap,
+// so that we can ensure it is a nydus image.
+func isNydusImage(manifest *ocispec.Manifest) bool {
+	layers := manifest.Layers
+	if len(layers) != 0 {
+		desc := layers[len(layers)-1]
+		if IsNydusBootstrap(desc) {
+			return true
+		}
+	}
+	return false
+}
+
 // convertManifest merges all the nydus blob layers into a
 // convertManifest merges all the nydus blob layers into a
 // nydus bootstrap layer, update the image config,
 // nydus bootstrap layer, update the image config,
 // and modify the image manifest.
 // and modify the image manifest.
-func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, error) {
+func convertManifest(ctx context.Context, cs content.Store, oldDesc ocispec.Descriptor, newDesc *ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, error) {
 	var manifest ocispec.Manifest
 	var manifest ocispec.Manifest
 	manifestDesc := *newDesc
 	manifestDesc := *newDesc
 	manifestLabels, err := readJSON(ctx, cs, &manifest, manifestDesc)
 	manifestLabels, err := readJSON(ctx, cs, &manifest, manifestDesc)
@@ -655,14 +914,21 @@ func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Des
 		return nil, errors.Wrap(err, "read manifest json")
 		return nil, errors.Wrap(err, "read manifest json")
 	}
 	}
 
 
+	if isNydusImage(&manifest) {
+		return &manifestDesc, nil
+	}
+
+	// This option needs to be enabled for image scenario.
+	opt.WithTar = true
+
+	// If the original image is already an OCI type, we should forcibly set the
+	// bootstrap layer to the OCI type.
+	if !opt.OCI && oldDesc.MediaType == ocispec.MediaTypeImageManifest {
+		opt.OCI = true
+	}
+
 	// Append bootstrap layer to manifest.
 	// Append bootstrap layer to manifest.
-	bootstrapDesc, blobDescs, err := MergeLayers(ctx, cs, manifest.Layers, MergeOption{
-		BuilderPath:   opt.BuilderPath,
-		WorkDir:       opt.WorkDir,
-		ChunkDictPath: opt.ChunkDictPath,
-		FsVersion:     opt.FsVersion,
-		WithTar:       true,
-	})
+	bootstrapDesc, blobDescs, err := MergeLayers(ctx, cs, manifest.Layers, opt)
 	if err != nil {
 	if err != nil {
 		return nil, errors.Wrap(err, "merge nydus layers")
 		return nil, errors.Wrap(err, "merge nydus layers")
 	}
 	}
@@ -678,7 +944,8 @@ func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Des
 		// Affected by chunk dict, the blob list referenced by final bootstrap
 		// Affected by chunk dict, the blob list referenced by final bootstrap
 		// are from different layers, part of them are from original layers, part
 		// are from different layers, part of them are from original layers, part
 		// from chunk dict bootstrap, so we need to rewrite manifest's layers here.
 		// from chunk dict bootstrap, so we need to rewrite manifest's layers here.
-		manifest.Layers = append(blobDescs, *bootstrapDesc)
+		blobDescs := append(blobDescs, *bootstrapDesc)
+		manifest.Layers = blobDescs
 	}
 	}
 
 
 	// Update the gc label of bootstrap layer
 	// Update the gc label of bootstrap layer
@@ -691,8 +958,13 @@ func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Des
 	if err != nil {
 	if err != nil {
 		return nil, errors.Wrap(err, "read image config")
 		return nil, errors.Wrap(err, "read image config")
 	}
 	}
+	bootstrapHistory := ocispec.History{
+		CreatedBy: "Nydus Converter",
+		Comment:   "Nydus Bootstrap Layer",
+	}
 	if opt.Backend != nil {
 	if opt.Backend != nil {
 		config.RootFS.DiffIDs = []digest.Digest{digest.Digest(bootstrapDesc.Annotations[LayerAnnotationUncompressed])}
 		config.RootFS.DiffIDs = []digest.Digest{digest.Digest(bootstrapDesc.Annotations[LayerAnnotationUncompressed])}
+		config.History = []ocispec.History{bootstrapHistory}
 	} else {
 	} else {
 		config.RootFS.DiffIDs = make([]digest.Digest, 0, len(manifest.Layers))
 		config.RootFS.DiffIDs = make([]digest.Digest, 0, len(manifest.Layers))
 		for i, layer := range manifest.Layers {
 		for i, layer := range manifest.Layers {
@@ -700,6 +972,9 @@ func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Des
 			// Remove useless annotation.
 			// Remove useless annotation.
 			delete(manifest.Layers[i].Annotations, LayerAnnotationUncompressed)
 			delete(manifest.Layers[i].Annotations, LayerAnnotationUncompressed)
 		}
 		}
+		// Append history item for bootstrap layer, to ensure the history consistency.
+		// See https://github.com/distribution/distribution/blob/e5d5810851d1f17a5070e9b6f940d8af98ea3c29/manifest/schema1/config_builder.go#L136
+		config.History = append(config.History, bootstrapHistory)
 	}
 	}
 	// Update image config in content store.
 	// Update image config in content store.
 	newConfigDesc, err := writeJSON(ctx, cs, config, manifest.Config, configLabels)
 	newConfigDesc, err := writeJSON(ctx, cs, config, manifest.Config, configLabels)
@@ -710,6 +985,11 @@ func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Des
 	// Update the config gc label
 	// Update the config gc label
 	manifestLabels[configGCLabelKey] = newConfigDesc.Digest.String()
 	manifestLabels[configGCLabelKey] = newConfigDesc.Digest.String()
 
 
+	// Associate a reference to the original OCI manifest.
+	// See the `subject` field description in
+	// https://github.com/opencontainers/image-spec/blob/main/manifest.md#image-manifest-property-descriptions
+	manifest.Subject = &oldDesc
+
 	// Update image manifest in content store.
 	// Update image manifest in content store.
 	newManifestDesc, err := writeJSON(ctx, cs, manifest, manifestDesc, manifestLabels)
 	newManifestDesc, err := writeJSON(ctx, cs, manifest, manifestDesc, manifestLabels)
 	if err != nil {
 	if err != nil {
@@ -726,33 +1006,45 @@ func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descript
 	layers := []Layer{}
 	layers := []Layer{}
 
 
 	var chainID digest.Digest
 	var chainID digest.Digest
-	for _, blobDesc := range descs {
-		ra, err := cs.ReaderAt(ctx, blobDesc)
+	nydusBlobDigests := []digest.Digest{}
+	for _, nydusBlobDesc := range descs {
+		ra, err := cs.ReaderAt(ctx, nydusBlobDesc)
 		if err != nil {
 		if err != nil {
-			return nil, nil, errors.Wrapf(err, "get reader for blob %q", blobDesc.Digest)
+			return nil, nil, errors.Wrapf(err, "get reader for blob %q", nydusBlobDesc.Digest)
 		}
 		}
 		defer ra.Close()
 		defer ra.Close()
+		var originalDigest *digest.Digest
+		if opt.OCIRef {
+			digestStr := nydusBlobDesc.Annotations[label.NydusRefLayer]
+			_originalDigest, err := digest.Parse(digestStr)
+			if err != nil {
+				return nil, nil, errors.Wrapf(err, "invalid label %s=%s", label.NydusRefLayer, digestStr)
+			}
+			originalDigest = &_originalDigest
+		}
 		layers = append(layers, Layer{
 		layers = append(layers, Layer{
-			Digest:   blobDesc.Digest,
-			ReaderAt: ra,
+			Digest:         nydusBlobDesc.Digest,
+			OriginalDigest: originalDigest,
+			ReaderAt:       ra,
 		})
 		})
 		if chainID == "" {
 		if chainID == "" {
-			chainID = identity.ChainID([]digest.Digest{blobDesc.Digest})
+			chainID = identity.ChainID([]digest.Digest{nydusBlobDesc.Digest})
 		} else {
 		} else {
-			chainID = identity.ChainID([]digest.Digest{chainID, blobDesc.Digest})
+			chainID = identity.ChainID([]digest.Digest{chainID, nydusBlobDesc.Digest})
 		}
 		}
+		nydusBlobDigests = append(nydusBlobDigests, nydusBlobDesc.Digest)
 	}
 	}
 
 
 	// Merge all nydus bootstraps into a final nydus bootstrap.
 	// Merge all nydus bootstraps into a final nydus bootstrap.
 	pr, pw := io.Pipe()
 	pr, pw := io.Pipe()
-	blobDigestChan := make(chan []digest.Digest, 1)
+	originalBlobDigestChan := make(chan []digest.Digest, 1)
 	go func() {
 	go func() {
 		defer pw.Close()
 		defer pw.Close()
-		blobDigests, err := Merge(ctx, layers, pw, opt)
+		originalBlobDigests, err := Merge(ctx, layers, pw, opt)
 		if err != nil {
 		if err != nil {
 			pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap"))
 			pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap"))
 		}
 		}
-		blobDigestChan <- blobDigests
+		originalBlobDigestChan <- originalBlobDigests
 	}()
 	}()
 
 
 	// Compress final nydus bootstrap to tar.gz and write into content store.
 	// Compress final nydus bootstrap to tar.gz and write into content store.
@@ -791,10 +1083,17 @@ func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descript
 		return nil, nil, errors.Wrap(err, "get info from content store")
 		return nil, nil, errors.Wrap(err, "get info from content store")
 	}
 	}
 
 
-	blobDigests := <-blobDigestChan
+	originalBlobDigests := <-originalBlobDigestChan
 	blobDescs := []ocispec.Descriptor{}
 	blobDescs := []ocispec.Descriptor{}
-	blobIDs := []string{}
-	for _, blobDigest := range blobDigests {
+
+	var blobDigests []digest.Digest
+	if opt.OCIRef {
+		blobDigests = nydusBlobDigests
+	} else {
+		blobDigests = originalBlobDigests
+	}
+
+	for idx, blobDigest := range blobDigests {
 		blobInfo, err := cs.Info(ctx, blobDigest)
 		blobInfo, err := cs.Info(ctx, blobDigest)
 		if err != nil {
 		if err != nil {
 			return nil, nil, errors.Wrap(err, "get info from content store")
 			return nil, nil, errors.Wrap(err, "get info from content store")
@@ -808,30 +1107,29 @@ func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descript
 				LayerAnnotationNydusBlob:    "true",
 				LayerAnnotationNydusBlob:    "true",
 			},
 			},
 		}
 		}
+		if opt.OCIRef {
+			blobDesc.Annotations[label.NydusRefLayer] = layers[idx].OriginalDigest.String()
+		}
 		blobDescs = append(blobDescs, blobDesc)
 		blobDescs = append(blobDescs, blobDesc)
-		blobIDs = append(blobIDs, blobDigest.Hex())
-	}
-
-	blobIDsBytes, err := json.Marshal(blobIDs)
-	if err != nil {
-		return nil, nil, errors.Wrap(err, "marshal blob ids")
 	}
 	}
 
 
 	if opt.FsVersion == "" {
 	if opt.FsVersion == "" {
-		opt.FsVersion = "5"
+		opt.FsVersion = "6"
+	}
+	mediaType := images.MediaTypeDockerSchema2LayerGzip
+	if opt.OCI {
+		mediaType = ocispec.MediaTypeImageLayerGzip
 	}
 	}
 
 
 	bootstrapDesc := ocispec.Descriptor{
 	bootstrapDesc := ocispec.Descriptor{
 		Digest:    compressedDgst,
 		Digest:    compressedDgst,
 		Size:      bootstrapInfo.Size,
 		Size:      bootstrapInfo.Size,
-		MediaType: ocispec.MediaTypeImageLayerGzip,
+		MediaType: mediaType,
 		Annotations: map[string]string{
 		Annotations: map[string]string{
 			LayerAnnotationUncompressed: uncompressedDgst.Digest().String(),
 			LayerAnnotationUncompressed: uncompressedDgst.Digest().String(),
 			LayerAnnotationFSVersion:    opt.FsVersion,
 			LayerAnnotationFSVersion:    opt.FsVersion,
 			// Use this annotation to identify nydus bootstrap layer.
 			// Use this annotation to identify nydus bootstrap layer.
 			LayerAnnotationNydusBootstrap: "true",
 			LayerAnnotationNydusBootstrap: "true",
-			// Track all blob digests for nydus snapshotter.
-			LayerAnnotationNydusBlobIDs: string(blobIDsBytes),
 		},
 		},
 	}
 	}
 
 

+ 168 - 0
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/cs_proxy_unix.go

@@ -0,0 +1,168 @@
+//go:build !windows
+// +build !windows
+
+/*
+ * Copyright (c) 2023. Nydus Developers. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package converter
+
+import (
+	"archive/tar"
+	"context"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"os"
+	"strconv"
+	"strings"
+
+	"github.com/containerd/containerd/content"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+)
+
+type contentStoreProxy struct {
+	socketPath string
+	server     *http.Server
+}
+
+func setupContentStoreProxy(workDir string, ra content.ReaderAt) (*contentStoreProxy, error) {
+	sockP, err := os.CreateTemp(workDir, "nydus-cs-proxy-*.sock")
+	if err != nil {
+		return nil, errors.Wrap(err, "create unix socket file")
+	}
+	if err := os.Remove(sockP.Name()); err != nil {
+		return nil, err
+	}
+	listener, err := net.Listen("unix", sockP.Name())
+	if err != nil {
+		return nil, errors.Wrap(err, "listen unix socket when setup content store proxy")
+	}
+
+	server := &http.Server{
+		Handler: contentProxyHandler(ra),
+	}
+
+	go func() {
+		if err := server.Serve(listener); err != nil && err != http.ErrServerClosed {
+			logrus.WithError(err).Warn("serve content store proxy")
+		}
+	}()
+
+	return &contentStoreProxy{
+		socketPath: sockP.Name(),
+		server:     server,
+	}, nil
+}
+
+func (p *contentStoreProxy) close() error {
+	defer os.Remove(p.socketPath)
+	if err := p.server.Shutdown(context.Background()); err != nil {
+		return errors.Wrap(err, "shutdown content store proxy")
+	}
+	return nil
+}
+
+func parseRangeHeader(rangeStr string, totalLen int64) (start, wantedLen int64, err error) {
+	rangeList := strings.Split(rangeStr, "-")
+	start, err = strconv.ParseInt(rangeList[0], 10, 64)
+	if err != nil {
+		err = errors.Wrap(err, "parse range header")
+		return
+	}
+	if len(rangeList) == 2 {
+		var end int64
+		end, err = strconv.ParseInt(rangeList[1], 10, 64)
+		if err != nil {
+			err = errors.Wrap(err, "parse range header")
+			return
+		}
+		wantedLen = end - start + 1
+	} else {
+		wantedLen = totalLen - start
+	}
+	if start < 0 || start >= totalLen || wantedLen <= 0 {
+		err = fmt.Errorf("invalid range header: %s", rangeStr)
+		return
+	}
+	return
+}
+
+func contentProxyHandler(ra content.ReaderAt) http.Handler {
+	var (
+		dataReader io.Reader
+		curPos     int64
+
+		tarHeader *tar.Header
+		totalLen  int64
+	)
+	resetReader := func() {
+		// TODO: Handle error?
+		_, _ = seekFile(ra, EntryBlob, func(reader io.Reader, hdr *tar.Header) error {
+			dataReader, tarHeader = reader, hdr
+			return nil
+		})
+		curPos = 0
+	}
+
+	resetReader()
+	if tarHeader != nil {
+		totalLen = tarHeader.Size
+	} else {
+		totalLen = ra.Size()
+	}
+	handler := func(w http.ResponseWriter, r *http.Request) {
+		switch r.Method {
+		case http.MethodHead:
+			{
+				w.Header().Set("Content-Length", strconv.FormatInt(totalLen, 10))
+				w.Header().Set("Content-Type", "application/octet-stream")
+				return
+			}
+		case http.MethodGet:
+			{
+				start, wantedLen, err := parseRangeHeader(strings.TrimPrefix(r.Header.Get("Range"), "bytes="), totalLen)
+				if err != nil {
+					w.WriteHeader(http.StatusBadRequest)
+					// TODO: Handle error?
+					_, _ = w.Write([]byte(err.Error()))
+					return
+				}
+
+				// we need to make sure that the dataReader is at the right position
+				if start < curPos {
+					resetReader()
+				}
+				if start > curPos {
+					_, err = io.CopyN(io.Discard, dataReader, start-curPos)
+					if err != nil {
+						w.WriteHeader(http.StatusInternalServerError)
+						// TODO: Handle error?
+						_, _ = w.Write([]byte(err.Error()))
+						return
+					}
+					curPos = start
+				}
+				// then, the curPos must be equal to start
+
+				readLen, err := io.CopyN(w, dataReader, wantedLen)
+				if err != nil && !errors.Is(err, io.EOF) {
+					w.WriteHeader(http.StatusInternalServerError)
+					// TODO: Handle error?
+					_, _ = w.Write([]byte(err.Error()))
+					return
+				}
+				curPos += readLen
+				w.Header().Set("Content-Length", strconv.FormatInt(readLen, 10))
+				w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, start+readLen-1, totalLen))
+				w.Header().Set("Content-Type", "application/octet-stream")
+				return
+			}
+		}
+	}
+	return http.HandlerFunc(handler)
+}

+ 140 - 25
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go

@@ -10,12 +10,11 @@ import (
 	"context"
 	"context"
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
-	"io/ioutil"
+	"os"
 	"os/exec"
 	"os/exec"
 	"strings"
 	"strings"
 	"time"
 	"time"
 
 
-	"github.com/containerd/nydus-snapshotter/pkg/errdefs"
 	"github.com/opencontainers/go-digest"
 	"github.com/opencontainers/go-digest"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"github.com/sirupsen/logrus"
@@ -23,6 +22,10 @@ import (
 
 
 var logger = logrus.WithField("module", "builder")
 var logger = logrus.WithField("module", "builder")
 
 
+func isSignalKilled(err error) bool {
+	return strings.Contains(err.Error(), "signal: killed")
+}
+
 type PackOption struct {
 type PackOption struct {
 	BuilderPath string
 	BuilderPath string
 
 
@@ -33,35 +36,47 @@ type PackOption struct {
 	ChunkDictPath    string
 	ChunkDictPath    string
 	PrefetchPatterns string
 	PrefetchPatterns string
 	Compressor       string
 	Compressor       string
+	OCIRef           bool
+	AlignedChunk     bool
+	ChunkSize        string
+	BatchSize        string
 	Timeout          *time.Duration
 	Timeout          *time.Duration
+
+	Features Features
 }
 }
 
 
 type MergeOption struct {
 type MergeOption struct {
 	BuilderPath string
 	BuilderPath string
 
 
 	SourceBootstrapPaths []string
 	SourceBootstrapPaths []string
-	TargetBootstrapPath  string
-	ChunkDictPath        string
-	PrefetchPatterns     string
-	OutputJSONPath       string
-	Timeout              *time.Duration
+	RafsBlobDigests      []string
+	RafsBlobTOCDigests   []string
+	RafsBlobSizes        []int64
+
+	TargetBootstrapPath string
+	ChunkDictPath       string
+	ParentBootstrapPath string
+	PrefetchPatterns    string
+	OutputJSONPath      string
+	Timeout             *time.Duration
 }
 }
 
 
 type UnpackOption struct {
 type UnpackOption struct {
-	BuilderPath   string
-	BootstrapPath string
-	BlobPath      string
-	TarPath       string
-	Timeout       *time.Duration
+	BuilderPath       string
+	BootstrapPath     string
+	BlobPath          string
+	BackendConfigPath string
+	TarPath           string
+	Timeout           *time.Duration
 }
 }
 
 
 type outputJSON struct {
 type outputJSON struct {
 	Blobs []string
 	Blobs []string
 }
 }
 
 
-func Pack(option PackOption) error {
+func buildPackArgs(option PackOption) []string {
 	if option.FsVersion == "" {
 	if option.FsVersion == "" {
-		option.FsVersion = "5"
+		option.FsVersion = "6"
 	}
 	}
 
 
 	args := []string{
 	args := []string{
@@ -72,14 +87,37 @@ func Pack(option PackOption) error {
 		"fs",
 		"fs",
 		"--blob",
 		"--blob",
 		option.BlobPath,
 		option.BlobPath,
-		"--source-type",
-		"directory",
 		"--whiteout-spec",
 		"--whiteout-spec",
 		"none",
 		"none",
 		"--fs-version",
 		"--fs-version",
 		option.FsVersion,
 		option.FsVersion,
-		"--inline-bootstrap",
 	}
 	}
+
+	if option.Features.Contains(FeatureTar2Rafs) {
+		args = append(
+			args,
+			"--type",
+			"tar-rafs",
+			"--blob-inline-meta",
+		)
+		if option.FsVersion == "6" {
+			args = append(
+				args,
+				"--features",
+				"blob-toc",
+			)
+		}
+	} else {
+		args = append(
+			args,
+			"--source-type",
+			"directory",
+			// Sames with `--blob-inline-meta`, it's used for compatibility
+			// with the old nydus-image builder.
+			"--inline-bootstrap",
+		)
+	}
+
 	if option.ChunkDictPath != "" {
 	if option.ChunkDictPath != "" {
 		args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath))
 		args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath))
 	}
 	}
@@ -89,8 +127,25 @@ func Pack(option PackOption) error {
 	if option.Compressor != "" {
 	if option.Compressor != "" {
 		args = append(args, "--compressor", option.Compressor)
 		args = append(args, "--compressor", option.Compressor)
 	}
 	}
+	if option.AlignedChunk {
+		args = append(args, "--aligned-chunk")
+	}
+	if option.ChunkSize != "" {
+		args = append(args, "--chunk-size", option.ChunkSize)
+	}
+	if option.BatchSize != "" {
+		args = append(args, "--batch-size", option.BatchSize)
+	}
 	args = append(args, option.SourcePath)
 	args = append(args, option.SourcePath)
 
 
+	return args
+}
+
+func Pack(option PackOption) error {
+	if option.OCIRef {
+		return packRef(option)
+	}
+
 	ctx := context.Background()
 	ctx := context.Background()
 	var cancel context.CancelFunc
 	var cancel context.CancelFunc
 	if option.Timeout != nil {
 	if option.Timeout != nil {
@@ -98,7 +153,8 @@ func Pack(option PackOption) error {
 		defer cancel()
 		defer cancel()
 	}
 	}
 
 
-	logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " "))
+	args := buildPackArgs(option)
+	logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args, " "))
 
 
 	cmd := exec.CommandContext(ctx, option.BuilderPath, args...)
 	cmd := exec.CommandContext(ctx, option.BuilderPath, args...)
 	cmd.Stdout = logger.Writer()
 	cmd.Stdout = logger.Writer()
@@ -106,7 +162,47 @@ func Pack(option PackOption) error {
 	cmd.Stdin = strings.NewReader(option.PrefetchPatterns)
 	cmd.Stdin = strings.NewReader(option.PrefetchPatterns)
 
 
 	if err := cmd.Run(); err != nil {
 	if err := cmd.Run(); err != nil {
-		if errdefs.IsSignalKilled(err) && option.Timeout != nil {
+		if isSignalKilled(err) && option.Timeout != nil {
+			logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout)
+		} else {
+			logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args)
+		}
+		return err
+	}
+
+	return nil
+}
+
+func packRef(option PackOption) error {
+	args := []string{
+		"create",
+		"--log-level",
+		"warn",
+		"--type",
+		"targz-ref",
+		"--blob-inline-meta",
+		"--features",
+		"blob-toc",
+		"--blob",
+		option.BlobPath,
+	}
+	args = append(args, option.SourcePath)
+
+	ctx := context.Background()
+	var cancel context.CancelFunc
+	if option.Timeout != nil {
+		ctx, cancel = context.WithTimeout(ctx, *option.Timeout)
+		defer cancel()
+	}
+
+	logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args, " "))
+
+	cmd := exec.CommandContext(ctx, option.BuilderPath, args...)
+	cmd.Stdout = logger.Writer()
+	cmd.Stderr = logger.Writer()
+
+	if err := cmd.Run(); err != nil {
+		if isSignalKilled(err) && option.Timeout != nil {
 			logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout)
 			logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout)
 		} else {
 		} else {
 			logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args)
 			logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args)
@@ -132,10 +228,26 @@ func Merge(option MergeOption) ([]digest.Digest, error) {
 	if option.ChunkDictPath != "" {
 	if option.ChunkDictPath != "" {
 		args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath))
 		args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath))
 	}
 	}
+	if option.ParentBootstrapPath != "" {
+		args = append(args, "--parent-bootstrap", option.ParentBootstrapPath)
+	}
 	if option.PrefetchPatterns == "" {
 	if option.PrefetchPatterns == "" {
 		option.PrefetchPatterns = "/"
 		option.PrefetchPatterns = "/"
 	}
 	}
 	args = append(args, option.SourceBootstrapPaths...)
 	args = append(args, option.SourceBootstrapPaths...)
+	if len(option.RafsBlobDigests) > 0 {
+		args = append(args, "--blob-digests", strings.Join(option.RafsBlobDigests, ","))
+	}
+	if len(option.RafsBlobTOCDigests) > 0 {
+		args = append(args, "--blob-toc-digests", strings.Join(option.RafsBlobTOCDigests, ","))
+	}
+	if len(option.RafsBlobSizes) > 0 {
+		sizes := []string{}
+		for _, size := range option.RafsBlobSizes {
+			sizes = append(sizes, fmt.Sprintf("%d", size))
+		}
+		args = append(args, "--blob-sizes", strings.Join(sizes, ","))
+	}
 
 
 	ctx := context.Background()
 	ctx := context.Background()
 	var cancel context.CancelFunc
 	var cancel context.CancelFunc
@@ -143,7 +255,7 @@ func Merge(option MergeOption) ([]digest.Digest, error) {
 		ctx, cancel = context.WithTimeout(ctx, *option.Timeout)
 		ctx, cancel = context.WithTimeout(ctx, *option.Timeout)
 		defer cancel()
 		defer cancel()
 	}
 	}
-	logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " "))
+	logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args, " "))
 
 
 	cmd := exec.CommandContext(ctx, option.BuilderPath, args...)
 	cmd := exec.CommandContext(ctx, option.BuilderPath, args...)
 	cmd.Stdout = logger.Writer()
 	cmd.Stdout = logger.Writer()
@@ -151,7 +263,7 @@ func Merge(option MergeOption) ([]digest.Digest, error) {
 	cmd.Stdin = strings.NewReader(option.PrefetchPatterns)
 	cmd.Stdin = strings.NewReader(option.PrefetchPatterns)
 
 
 	if err := cmd.Run(); err != nil {
 	if err := cmd.Run(); err != nil {
-		if errdefs.IsSignalKilled(err) && option.Timeout != nil {
+		if isSignalKilled(err) && option.Timeout != nil {
 			logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout)
 			logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout)
 		} else {
 		} else {
 			logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args)
 			logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args)
@@ -159,7 +271,7 @@ func Merge(option MergeOption) ([]digest.Digest, error) {
 		return nil, errors.Wrap(err, "run merge command")
 		return nil, errors.Wrap(err, "run merge command")
 	}
 	}
 
 
-	outputBytes, err := ioutil.ReadFile(option.OutputJSONPath)
+	outputBytes, err := os.ReadFile(option.OutputJSONPath)
 	if err != nil {
 	if err != nil {
 		return nil, errors.Wrapf(err, "read file %s", option.OutputJSONPath)
 		return nil, errors.Wrapf(err, "read file %s", option.OutputJSONPath)
 	}
 	}
@@ -187,7 +299,10 @@ func Unpack(option UnpackOption) error {
 		"--output",
 		"--output",
 		option.TarPath,
 		option.TarPath,
 	}
 	}
-	if option.BlobPath != "" {
+
+	if option.BackendConfigPath != "" {
+		args = append(args, "--backend-config", option.BackendConfigPath)
+	} else if option.BlobPath != "" {
 		args = append(args, "--blob", option.BlobPath)
 		args = append(args, "--blob", option.BlobPath)
 	}
 	}
 
 
@@ -198,14 +313,14 @@ func Unpack(option UnpackOption) error {
 		defer cancel()
 		defer cancel()
 	}
 	}
 
 
-	logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " "))
+	logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args, " "))
 
 
 	cmd := exec.CommandContext(ctx, option.BuilderPath, args...)
 	cmd := exec.CommandContext(ctx, option.BuilderPath, args...)
 	cmd.Stdout = logger.Writer()
 	cmd.Stdout = logger.Writer()
 	cmd.Stderr = logger.Writer()
 	cmd.Stderr = logger.Writer()
 
 
 	if err := cmd.Run(); err != nil {
 	if err := cmd.Run(); err != nil {
-		if errdefs.IsSignalKilled(err) && option.Timeout != nil {
+		if isSignalKilled(err) && option.Timeout != nil {
 			logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout)
 			logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout)
 		} else {
 		} else {
 			logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args)
 			logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args)

+ 113 - 0
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/feature.go

@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2023. Nydus Developers. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package tool
+
+import (
+	"context"
+	"os"
+	"os/exec"
+	"regexp"
+	"sync"
+
+	"github.com/sirupsen/logrus"
+	"golang.org/x/mod/semver"
+)
+
+const envNydusDisableTar2Rafs = "NYDUS_DISABLE_TAR2RAFS"
+
+var currentVersion string
+var currentVersionDetectOnce sync.Once
+var disableTar2Rafs = os.Getenv(envNydusDisableTar2Rafs) != ""
+
+const (
+	// The option `--type tar-rafs` enables converting OCI tar blob
+	// stream into nydus blob directly, the tar2rafs eliminates the
+	// need to decompress it to a local directory first, thus greatly
+	// accelerating the pack process.
+	FeatureTar2Rafs Feature = "--type tar-rafs"
+)
+
+var featureMap = map[Feature]string{
+	FeatureTar2Rafs: "v2.2",
+}
+
+type Feature string
+type Features []Feature
+
+func (features *Features) Contains(feature Feature) bool {
+	for _, feat := range *features {
+		if feat == feature {
+			return true
+		}
+	}
+	return false
+}
+
+func (features *Features) Remove(feature Feature) {
+	found := -1
+	for idx, feat := range *features {
+		if feat == feature {
+			found = idx
+			break
+		}
+	}
+	if found != -1 {
+		*features = append((*features)[:found], (*features)[found+1:]...)
+	}
+}
+
+func detectVersion(msg []byte) string {
+	re := regexp.MustCompile(`Version:\s*v*(\d+.\d+.\d+)`)
+	matches := re.FindSubmatch(msg)
+	if len(matches) > 1 {
+		return string(matches[1])
+	}
+	return ""
+}
+
+// DetectFeatures returns supported feature list from required feature list.
+func DetectFeatures(builder string, required Features) Features {
+	currentVersionDetectOnce.Do(func() {
+		if required.Contains(FeatureTar2Rafs) && disableTar2Rafs {
+			logrus.Warnf("the feature '%s' is disabled by env '%s'", FeatureTar2Rafs, envNydusDisableTar2Rafs)
+		}
+
+		cmd := exec.CommandContext(context.Background(), builder, "--version")
+		output, err := cmd.Output()
+		if err != nil {
+			return
+		}
+
+		currentVersion = detectVersion(output)
+	})
+
+	if currentVersion == "" {
+		return Features{}
+	}
+
+	detectedFeatures := Features{}
+	for _, feature := range required {
+		requiredVersion := featureMap[feature]
+		if requiredVersion == "" {
+			detectedFeatures = append(detectedFeatures, feature)
+			continue
+		}
+
+		// The feature is supported by current version
+		supported := semver.Compare(requiredVersion, "v"+currentVersion) <= 0
+		if supported {
+			// It is an experimental feature, so we still provide an env
+			// variable to allow users to disable it.
+			if feature == FeatureTar2Rafs && disableTar2Rafs {
+				continue
+			}
+			detectedFeatures = append(detectedFeatures, feature)
+		}
+	}
+
+	return detectedFeatures
+}

+ 98 - 5
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go

@@ -8,24 +8,41 @@ package converter
 
 
 import (
 import (
 	"context"
 	"context"
+	"errors"
+	"fmt"
+	"strings"
 	"time"
 	"time"
 
 
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/content"
+	"github.com/containerd/nydus-snapshotter/pkg/converter/tool"
 	"github.com/opencontainers/go-digest"
 	"github.com/opencontainers/go-digest"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type Compressor = uint32
+
+const (
+	CompressorNone Compressor = 0x0001
+	CompressorZstd Compressor = 0x0002
+)
+
+var (
+	ErrNotFound = errors.New("data not found")
 )
 )
 
 
 type Layer struct {
 type Layer struct {
 	// Digest represents the hash of whole tar blob.
 	// Digest represents the hash of whole tar blob.
 	Digest digest.Digest
 	Digest digest.Digest
+	// Digest represents the original OCI tar(.gz) blob.
+	OriginalDigest *digest.Digest
 	// ReaderAt holds the reader of whole tar blob.
 	// ReaderAt holds the reader of whole tar blob.
 	ReaderAt content.ReaderAt
 	ReaderAt content.ReaderAt
 }
 }
 
 
-// Backend uploads blobs generated by nydus-image builder to a backend storage such as:
-// - oss: A object storage backend, which uses its SDK to upload blob file.
+// Backend uploads blobs generated by nydus-image builder to a backend storage.
 type Backend interface {
 type Backend interface {
 	// Push pushes specified blob file to remote storage backend.
 	// Push pushes specified blob file to remote storage backend.
-	Push(ctx context.Context, ra content.ReaderAt, blobDigest digest.Digest) error
+	Push(ctx context.Context, cs content.Store, desc ocispec.Descriptor) error
 	// Check checks whether a blob exists in remote storage backend,
 	// Check checks whether a blob exists in remote storage backend,
 	// blob exists -> return (blobPath, nil)
 	// blob exists -> return (blobPath, nil)
 	// blob not exists -> return ("", err)
 	// blob not exists -> return ("", err)
@@ -40,7 +57,7 @@ type PackOption struct {
 	// BuilderPath holds the path of `nydus-image` binary tool.
 	// BuilderPath holds the path of `nydus-image` binary tool.
 	BuilderPath string
 	BuilderPath string
 	// FsVersion specifies nydus RAFS format version, possible
 	// FsVersion specifies nydus RAFS format version, possible
-	// values: `5`, `6` (EROFS-compatible), default is `5`.
+	// values: `5`, `6` (EROFS-compatible), default is `6`.
 	FsVersion string
 	FsVersion string
 	// ChunkDictPath holds the bootstrap path of chunk dict image.
 	// ChunkDictPath holds the bootstrap path of chunk dict image.
 	ChunkDictPath string
 	ChunkDictPath string
@@ -48,10 +65,22 @@ type PackOption struct {
 	PrefetchPatterns string
 	PrefetchPatterns string
 	// Compressor specifies nydus blob compression algorithm.
 	// Compressor specifies nydus blob compression algorithm.
 	Compressor string
 	Compressor string
+	// OCIRef enables converting OCI tar(.gz) blob to nydus referenced blob.
+	OCIRef bool
+	// AlignedChunk aligns uncompressed data chunks to 4K, only for RAFS V5.
+	AlignedChunk bool
+	// ChunkSize sets the size of data chunks, must be power of two and between 0x1000-0x1000000.
+	ChunkSize string
+	// BacthSize sets the size of batch data chunks, must be power of two and between 0x1000-0x1000000 or zero.
+	BatchSize string
 	// Backend uploads blobs generated by nydus-image builder to a backend storage.
 	// Backend uploads blobs generated by nydus-image builder to a backend storage.
 	Backend Backend
 	Backend Backend
 	// Timeout cancels execution once exceed the specified time.
 	// Timeout cancels execution once exceed the specified time.
 	Timeout *time.Duration
 	Timeout *time.Duration
+
+	// Features keeps a feature list supported by newer version of builder,
+	// It is detected automatically, so don't export it.
+	features tool.Features
 }
 }
 
 
 type MergeOption struct {
 type MergeOption struct {
@@ -60,14 +89,20 @@ type MergeOption struct {
 	// BuilderPath holds the path of `nydus-image` binary tool.
 	// BuilderPath holds the path of `nydus-image` binary tool.
 	BuilderPath string
 	BuilderPath string
 	// FsVersion specifies nydus RAFS format version, possible
 	// FsVersion specifies nydus RAFS format version, possible
-	// values: `5`, `6` (EROFS-compatible), default is `5`.
+	// values: `5`, `6` (EROFS-compatible), default is `6`.
 	FsVersion string
 	FsVersion string
 	// ChunkDictPath holds the bootstrap path of chunk dict image.
 	// ChunkDictPath holds the bootstrap path of chunk dict image.
 	ChunkDictPath string
 	ChunkDictPath string
+	// ParentBootstrapPath holds the bootstrap path of parent image.
+	ParentBootstrapPath string
 	// PrefetchPatterns holds file path pattern list want to prefetch.
 	// PrefetchPatterns holds file path pattern list want to prefetch.
 	PrefetchPatterns string
 	PrefetchPatterns string
 	// WithTar puts bootstrap into a tar stream (no gzip).
 	// WithTar puts bootstrap into a tar stream (no gzip).
 	WithTar bool
 	WithTar bool
+	// OCI converts docker media types to OCI media types.
+	OCI bool
+	// OCIRef enables converting OCI tar(.gz) blob to nydus referenced blob.
+	OCIRef bool
 	// Backend uploads blobs generated by nydus-image builder to a backend storage.
 	// Backend uploads blobs generated by nydus-image builder to a backend storage.
 	Backend Backend
 	Backend Backend
 	// Timeout cancels execution once exceed the specified time.
 	// Timeout cancels execution once exceed the specified time.
@@ -81,4 +116,62 @@ type UnpackOption struct {
 	BuilderPath string
 	BuilderPath string
 	// Timeout cancels execution once exceed the specified time.
 	// Timeout cancels execution once exceed the specified time.
 	Timeout *time.Duration
 	Timeout *time.Duration
+	// Stream enables streaming mode, which doesn't unpack the blob data to disk,
+	// but setup a http server to serve the blob data.
+	Stream bool
+}
+
+type TOCEntry struct {
+	// Feature flags of entry
+	Flags     uint32
+	Reserved1 uint32
+	// Name of entry data
+	Name [16]byte
+	// Sha256 of uncompressed entry data
+	UncompressedDigest [32]byte
+	// Offset of compressed entry data
+	CompressedOffset uint64
+	// Size of compressed entry data
+	CompressedSize uint64
+	// Size of uncompressed entry data
+	UncompressedSize uint64
+	Reserved2        [44]byte
+}
+
+func (entry *TOCEntry) GetCompressor() (Compressor, error) {
+	switch {
+	case entry.Flags&CompressorNone == CompressorNone:
+		return CompressorNone, nil
+	case entry.Flags&CompressorZstd == CompressorZstd:
+		return CompressorZstd, nil
+	}
+	return 0, fmt.Errorf("unsupported compressor, entry flags %x", entry.Flags)
+}
+
+func (entry *TOCEntry) GetName() string {
+	var name strings.Builder
+	name.Grow(16)
+	for _, c := range entry.Name {
+		if c == 0 {
+			break
+		}
+		fmt.Fprintf(&name, "%c", c)
+	}
+	return name.String()
+}
+
+func (entry *TOCEntry) GetUncompressedDigest() string {
+	return fmt.Sprintf("%x", entry.UncompressedDigest)
+}
+
+func (entry *TOCEntry) GetCompressedOffset() uint64 {
+	return entry.CompressedOffset
+}
+
+func (entry *TOCEntry) GetCompressedSize() uint64 {
+	return entry.CompressedSize
+}
+
+func (entry *TOCEntry) GetUncompressedSize() uint64 {
+	return entry.UncompressedSize
 }
 }

+ 13 - 7
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go

@@ -59,18 +59,20 @@ type seekReader struct {
 
 
 func (ra *seekReader) Read(p []byte) (int, error) {
 func (ra *seekReader) Read(p []byte) (int, error) {
 	n, err := ra.ReaderAt.ReadAt(p, ra.pos)
 	n, err := ra.ReaderAt.ReadAt(p, ra.pos)
-	ra.pos += int64(len(p))
+	ra.pos += int64(n)
 	return n, err
 	return n, err
 }
 }
 
 
 func (ra *seekReader) Seek(offset int64, whence int) (int64, error) {
 func (ra *seekReader) Seek(offset int64, whence int) (int64, error) {
-	if whence == io.SeekCurrent {
+	switch {
+	case whence == io.SeekCurrent:
 		ra.pos += offset
 		ra.pos += offset
-	} else if whence == io.SeekStart {
+	case whence == io.SeekStart:
 		ra.pos = offset
 		ra.pos = offset
-	} else {
+	default:
 		return 0, fmt.Errorf("unsupported whence %d", whence)
 		return 0, fmt.Errorf("unsupported whence %d", whence)
 	}
 	}
+
 	return ra.pos, nil
 	return ra.pos, nil
 }
 }
 
 
@@ -126,11 +128,12 @@ func packToTar(src string, name string, compress bool) (io.ReadCloser, error) {
 			var finalErr error
 			var finalErr error
 
 
 			// Return the first error encountered to the other end and ignore others.
 			// Return the first error encountered to the other end and ignore others.
-			if err != nil {
+			switch {
+			case err != nil:
 				finalErr = err
 				finalErr = err
-			} else if err1 != nil {
+			case err1 != nil:
 				finalErr = err1
 				finalErr = err1
-			} else if err2 != nil {
+			case err2 != nil:
 				finalErr = err2
 				finalErr = err2
 			}
 			}
 
 
@@ -168,6 +171,9 @@ func readJSON(ctx context.Context, cs content.Store, x interface{}, desc ocispec
 		return nil, err
 		return nil, err
 	}
 	}
 	labels := info.Labels
 	labels := info.Labels
+	if labels == nil {
+		labels = map[string]string{}
+	}
 	b, err := content.ReadBlob(ctx, cs, desc)
 	b, err := content.ReadBlob(ctx, cs, desc)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err

+ 7 - 10
vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go

@@ -9,17 +9,19 @@ package errdefs
 import (
 import (
 	stderrors "errors"
 	stderrors "errors"
 	"net"
 	"net"
-	"strings"
 	"syscall"
 	"syscall"
 
 
+	"github.com/containerd/containerd/errdefs"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
 
 
-const signalKilled = "signal: killed"
-
 var (
 var (
-	ErrAlreadyExists = errors.New("already exists")
-	ErrNotFound      = errors.New("not found")
+	ErrAlreadyExists   = errdefs.ErrAlreadyExists
+	ErrNotFound        = errdefs.ErrNotFound
+	ErrInvalidArgument = errors.New("invalid argument")
+	ErrUnavailable     = errors.New("unavailable")
+	ErrNotImplemented  = errors.New("not implemented") // represents not supported and unimplemented
+	ErrDeviceBusy      = errors.New("device busy")     // represents not supported and unimplemented
 )
 )
 
 
 // IsAlreadyExists returns true if the error is due to already exists
 // IsAlreadyExists returns true if the error is due to already exists
@@ -32,11 +34,6 @@ func IsNotFound(err error) bool {
 	return errors.Is(err, ErrNotFound)
 	return errors.Is(err, ErrNotFound)
 }
 }
 
 
-// IsSignalKilled returns true if the error is signal killed
-func IsSignalKilled(err error) bool {
-	return strings.Contains(err.Error(), signalKilled)
-}
-
 // IsConnectionClosed returns true if error is due to connection closed
 // IsConnectionClosed returns true if error is due to connection closed
 // this is used when snapshotter closed by sig term
 // this is used when snapshotter closed by sig term
 func IsConnectionClosed(err error) bool {
 func IsConnectionClosed(err error) bool {

+ 64 - 0
vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go

@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2020. Ant Group. All rights reserved.
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package label
+
+import (
+	snpkg "github.com/containerd/containerd/pkg/snapshotters"
+)
+
+// For package compatibility, we still keep the old exported name here.
+var AppendLabelsHandlerWrapper = snpkg.AppendInfoHandlerWrapper
+
+// For package compatibility, we still keep the old exported name here.
+const (
+	CRIImageRef       = snpkg.TargetRefLabel
+	CRIImageLayers    = snpkg.TargetImageLayersLabel
+	CRILayerDigest    = snpkg.TargetLayerDigestLabel
+	CRIManifestDigest = snpkg.TargetManifestDigestLabel
+)
+
+const (
+	// Marker for remote snapshotter to handle the pull request.
+	// During image pull, the containerd client calls Prepare API with the label containerd.io/snapshot.ref.
+	// This is a containerd-defined label which contains ChainID that targets a committed snapshot that the
+	// client is trying to prepare.
+	TargetSnapshotRef = "containerd.io/snapshot.ref"
+
+	// A bool flag to mark the blob as a Nydus data blob, set by image builders.
+	NydusDataLayer = "containerd.io/snapshot/nydus-blob"
+	// A bool flag to mark the blob as a nydus bootstrap, set by image builders.
+	NydusMetaLayer = "containerd.io/snapshot/nydus-bootstrap"
+	// The referenced blob sha256 in format of `sha256:xxx`, set by image builders.
+	NydusRefLayer = "containerd.io/snapshot/nydus-ref"
+	// Annotation containing secret to pull images from registry, set by the snapshotter.
+	NydusImagePullSecret = "containerd.io/snapshot/pullsecret"
+	// Annotation containing username to pull images from registry, set by the snapshotter.
+	NydusImagePullUsername = "containerd.io/snapshot/pullusername"
+	// A bool flag to enable integrity verification of meta data blob
+	NydusSignature = "containerd.io/snapshot/nydus-signature"
+
+	// A bool flag to mark the blob as a estargz data blob, set by the snapshotter.
+	StargzLayer = "containerd.io/snapshot/stargz"
+
+	// volatileOpt is a key of an optional label to each snapshot.
+	// If this optional label of a snapshot is specified, when mounted to rootdir
+	// this snapshot will include volatile option
+	OverlayfsVolatileOpt = "containerd.io/snapshot/overlay.volatile"
+)
+
+func IsNydusDataLayer(labels map[string]string) bool {
+	_, ok := labels[NydusDataLayer]
+	return ok
+}
+
+func IsNydusMetaLayer(labels map[string]string) bool {
+	if labels == nil {
+		return false
+	}
+	_, ok := labels[NydusMetaLayer]
+	return ok
+}

+ 0 - 2
vendor/github.com/containerd/typeurl/.gitignore

@@ -1,2 +0,0 @@
-*.test
-coverage.txt

+ 0 - 20
vendor/github.com/containerd/typeurl/README.md

@@ -1,20 +0,0 @@
-# typeurl
-
-[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/typeurl)](https://pkg.go.dev/github.com/containerd/typeurl)
-[![Build Status](https://github.com/containerd/typeurl/workflows/CI/badge.svg)](https://github.com/containerd/typeurl/actions?query=workflow%3ACI)
-[![codecov](https://codecov.io/gh/containerd/typeurl/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl)
-[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/typeurl)](https://goreportcard.com/report/github.com/containerd/typeurl)
-
-A Go package for managing the registration, marshaling, and unmarshaling of encoded types.
-
-This package helps when types are sent over a GRPC API and marshaled as a [protobuf.Any](https://github.com/gogo/protobuf/blob/master/protobuf/google/protobuf/any.proto).
-
-## Project details
-
-**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
-As a containerd sub-project, you will find the:
- * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
- * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
- * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
-
-information in our [`containerd/project`](https://github.com/containerd/project) repository.

+ 0 - 83
vendor/github.com/containerd/typeurl/doc.go

@@ -1,83 +0,0 @@
-/*
-   Copyright The containerd Authors.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-*/
-
-package typeurl
-
-// Package typeurl assists with managing the registration, marshaling, and
-// unmarshaling of types encoded as protobuf.Any.
-//
-// A protobuf.Any is a proto message that can contain any arbitrary data. It
-// consists of two components, a TypeUrl and a Value, and its proto definition
-// looks like this:
-//
-//   message Any {
-//     string type_url = 1;
-//     bytes value = 2;
-//   }
-//
-// The TypeUrl is used to distinguish the contents from other proto.Any
-// messages. This typeurl library manages these URLs to enable automagic
-// marshaling and unmarshaling of the contents.
-//
-// For example, consider this go struct:
-//
-//   type Foo struct {
-//     Field1 string
-//     Field2 string
-//   }
-//
-// To use typeurl, types must first be registered. This is typically done in
-// the init function
-//
-//   func init() {
-//      typeurl.Register(&Foo{}, "Foo")
-//   }
-//
-// This will register the type Foo with the url path "Foo". The arguments to
-// Register are variadic, and are used to construct a url path. Consider this
-// example, from the github.com/containerd/containerd/client package:
-//
-//   func init() {
-//     const prefix = "types.containerd.io"
-//     // register TypeUrls for commonly marshaled external types
-//     major := strconv.Itoa(specs.VersionMajor)
-//     typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec")
-//     // this function has more Register calls, which are elided.
-//   }
-//
-// This registers several types under a more complex url, which ends up mapping
-// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other
-// value for major).
-//
-// Once a type is registered, it can be marshaled to a proto.Any message simply
-// by calling `MarshalAny`, like this:
-//
-//   foo := &Foo{Field1: "value1", Field2: "value2"}
-//   anyFoo, err := typeurl.MarshalAny(foo)
-//
-// MarshalAny will resolve the correct URL for the type. If the type in
-// question implements the proto.Message interface, then it will be marshaled
-// as a proto message. Otherwise, it will be marshaled as json. This means that
-// typeurl will work on any arbitrary data, whether or not it has a proto
-// definition, as long as it can be serialized to json.
-//
-// To unmarshal, the process is simply inverse:
-//
-//   iface, err := typeurl.UnmarshalAny(anyFoo)
-//   foo := iface.(*Foo)
-//
-// The correct type is automatically chosen from the type registry, and the
-// returned interface can be cast straight to that type.

+ 0 - 214
vendor/github.com/containerd/typeurl/types.go

@@ -1,214 +0,0 @@
-/*
-   Copyright The containerd Authors.
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-*/
-
-package typeurl
-
-import (
-	"encoding/json"
-	"path"
-	"reflect"
-	"sync"
-
-	"github.com/gogo/protobuf/proto"
-	"github.com/gogo/protobuf/types"
-	"github.com/pkg/errors"
-)
-
-var (
-	mu       sync.RWMutex
-	registry = make(map[reflect.Type]string)
-)
-
-// Definitions of common error types used throughout typeurl.
-//
-// These error types are used with errors.Wrap and errors.Wrapf to add context
-// to an error.
-//
-// To detect an error class, use errors.Is() functions to tell whether an
-// error is of this type.
-var (
-	ErrNotFound = errors.New("not found")
-)
-
-// Register a type with a base URL for JSON marshaling. When the MarshalAny and
-// UnmarshalAny functions are called they will treat the Any type value as JSON.
-// To use protocol buffers for handling the Any value the proto.Register
-// function should be used instead of this function.
-func Register(v interface{}, args ...string) {
-	var (
-		t = tryDereference(v)
-		p = path.Join(args...)
-	)
-	mu.Lock()
-	defer mu.Unlock()
-	if et, ok := registry[t]; ok {
-		if et != p {
-			panic(errors.Errorf("type registered with alternate path %q != %q", et, p))
-		}
-		return
-	}
-	registry[t] = p
-}
-
-// TypeURL returns the type url for a registered type.
-func TypeURL(v interface{}) (string, error) {
-	mu.RLock()
-	u, ok := registry[tryDereference(v)]
-	mu.RUnlock()
-	if !ok {
-		// fallback to the proto registry if it is a proto message
-		pb, ok := v.(proto.Message)
-		if !ok {
-			return "", errors.Wrapf(ErrNotFound, "type %s", reflect.TypeOf(v))
-		}
-		return proto.MessageName(pb), nil
-	}
-	return u, nil
-}
-
-// Is returns true if the type of the Any is the same as v.
-func Is(any *types.Any, v interface{}) bool {
-	// call to check that v is a pointer
-	tryDereference(v)
-	url, err := TypeURL(v)
-	if err != nil {
-		return false
-	}
-	return any.TypeUrl == url
-}
-
-// MarshalAny marshals the value v into an any with the correct TypeUrl.
-// If the provided object is already a proto.Any message, then it will be
-// returned verbatim. If it is of type proto.Message, it will be marshaled as a
-// protocol buffer. Otherwise, the object will be marshaled to json.
-func MarshalAny(v interface{}) (*types.Any, error) {
-	var marshal func(v interface{}) ([]byte, error)
-	switch t := v.(type) {
-	case *types.Any:
-		// avoid reserializing the type if we have an any.
-		return t, nil
-	case proto.Message:
-		marshal = func(v interface{}) ([]byte, error) {
-			return proto.Marshal(t)
-		}
-	default:
-		marshal = json.Marshal
-	}
-
-	url, err := TypeURL(v)
-	if err != nil {
-		return nil, err
-	}
-
-	data, err := marshal(v)
-	if err != nil {
-		return nil, err
-	}
-	return &types.Any{
-		TypeUrl: url,
-		Value:   data,
-	}, nil
-}
-
-// UnmarshalAny unmarshals the any type into a concrete type.
-func UnmarshalAny(any *types.Any) (interface{}, error) {
-	return UnmarshalByTypeURL(any.TypeUrl, any.Value)
-}
-
-// UnmarshalByTypeURL unmarshals the given type and value to into a concrete type.
-func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) {
-	return unmarshal(typeURL, value, nil)
-}
-
-// UnmarshalTo unmarshals the any type into a concrete type passed in the out
-// argument. It is identical to UnmarshalAny, but lets clients provide a
-// destination type through the out argument.
-func UnmarshalTo(any *types.Any, out interface{}) error {
-	return UnmarshalToByTypeURL(any.TypeUrl, any.Value, out)
-}
-
-// UnmarshalTo unmarshals the given type and value into a concrete type passed
-// in the out argument. It is identical to UnmarshalByTypeURL, but lets clients
-// provide a destination type through the out argument.
-func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error {
-	_, err := unmarshal(typeURL, value, out)
-	return err
-}
-
-func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
-	t, err := getTypeByUrl(typeURL)
-	if err != nil {
-		return nil, err
-	}
-
-	if v == nil {
-		v = reflect.New(t.t).Interface()
-	} else {
-		// Validate interface type provided by client
-		vURL, err := TypeURL(v)
-		if err != nil {
-			return nil, err
-		}
-		if typeURL != vURL {
-			return nil, errors.Errorf("can't unmarshal type %q to output %q", typeURL, vURL)
-		}
-	}
-
-	if t.isProto {
-		err = proto.Unmarshal(value, v.(proto.Message))
-	} else {
-		err = json.Unmarshal(value, v)
-	}
-
-	return v, err
-}
-
-type urlType struct {
-	t       reflect.Type
-	isProto bool
-}
-
-func getTypeByUrl(url string) (urlType, error) {
-	mu.RLock()
-	for t, u := range registry {
-		if u == url {
-			mu.RUnlock()
-			return urlType{
-				t: t,
-			}, nil
-		}
-	}
-	mu.RUnlock()
-	// fallback to proto registry
-	t := proto.MessageType(url)
-	if t != nil {
-		return urlType{
-			// get the underlying Elem because proto returns a pointer to the type
-			t:       t.Elem(),
-			isProto: true,
-		}, nil
-	}
-	return urlType{}, errors.Wrapf(ErrNotFound, "type with url %s", url)
-}
-
-func tryDereference(v interface{}) reflect.Type {
-	t := reflect.TypeOf(v)
-	if t.Kind() == reflect.Ptr {
-		// require check of pointer but dereference to register
-		return t.Elem()
-	}
-	panic("v is not a pointer to a type")
-}

+ 32 - 31
vendor/github.com/moby/buildkit/cache/blobs.go

@@ -11,6 +11,7 @@ import (
 	"github.com/containerd/containerd/leases"
 	"github.com/containerd/containerd/leases"
 	"github.com/containerd/containerd/mount"
 	"github.com/containerd/containerd/mount"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session"
+	"github.com/moby/buildkit/util/bklog"
 	"github.com/moby/buildkit/util/compression"
 	"github.com/moby/buildkit/util/compression"
 	"github.com/moby/buildkit/util/flightcontrol"
 	"github.com/moby/buildkit/util/flightcontrol"
 	"github.com/moby/buildkit/util/winlayers"
 	"github.com/moby/buildkit/util/winlayers"
@@ -18,11 +19,11 @@ import (
 	imagespecidentity "github.com/opencontainers/image-spec/identity"
 	imagespecidentity "github.com/opencontainers/image-spec/identity"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sync/errgroup"
 	"golang.org/x/sync/errgroup"
 )
 )
 
 
-var g flightcontrol.Group
+var g flightcontrol.Group[struct{}]
+var gFileList flightcontrol.Group[[]string]
 
 
 const containerdUncompressed = "containerd.io/uncompressed"
 const containerdUncompressed = "containerd.io/uncompressed"
 
 
@@ -86,12 +87,12 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 
 
 	if _, ok := filter[sr.ID()]; ok {
 	if _, ok := filter[sr.ID()]; ok {
 		eg.Go(func() error {
 		eg.Go(func() error {
-			_, err := g.Do(ctx, fmt.Sprintf("%s-%t", sr.ID(), createIfNeeded), func(ctx context.Context) (interface{}, error) {
+			_, err := g.Do(ctx, fmt.Sprintf("%s-%t", sr.ID(), createIfNeeded), func(ctx context.Context) (struct{}, error) {
 				if sr.getBlob() != "" {
 				if sr.getBlob() != "" {
-					return nil, nil
+					return struct{}{}, nil
 				}
 				}
 				if !createIfNeeded {
 				if !createIfNeeded {
-					return nil, errors.WithStack(ErrNoBlobs)
+					return struct{}{}, errors.WithStack(ErrNoBlobs)
 				}
 				}
 
 
 				compressorFunc, finalize := comp.Type.Compress(ctx, comp)
 				compressorFunc, finalize := comp.Type.Compress(ctx, comp)
@@ -108,12 +109,12 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 				if lowerRef != nil {
 				if lowerRef != nil {
 					m, err := lowerRef.Mount(ctx, true, s)
 					m, err := lowerRef.Mount(ctx, true, s)
 					if err != nil {
 					if err != nil {
-						return nil, err
+						return struct{}{}, err
 					}
 					}
 					var release func() error
 					var release func() error
 					lower, release, err = m.Mount()
 					lower, release, err = m.Mount()
 					if err != nil {
 					if err != nil {
-						return nil, err
+						return struct{}{}, err
 					}
 					}
 					if release != nil {
 					if release != nil {
 						defer release()
 						defer release()
@@ -131,12 +132,12 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 				if upperRef != nil {
 				if upperRef != nil {
 					m, err := upperRef.Mount(ctx, true, s)
 					m, err := upperRef.Mount(ctx, true, s)
 					if err != nil {
 					if err != nil {
-						return nil, err
+						return struct{}{}, err
 					}
 					}
 					var release func() error
 					var release func() error
 					upper, release, err = m.Mount()
 					upper, release, err = m.Mount()
 					if err != nil {
 					if err != nil {
-						return nil, err
+						return struct{}{}, err
 					}
 					}
 					if release != nil {
 					if release != nil {
 						defer release()
 						defer release()
@@ -151,7 +152,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 				if forceOvlStr := os.Getenv("BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF"); forceOvlStr != "" && sr.kind() != Diff {
 				if forceOvlStr := os.Getenv("BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF"); forceOvlStr != "" && sr.kind() != Diff {
 					enableOverlay, err = strconv.ParseBool(forceOvlStr)
 					enableOverlay, err = strconv.ParseBool(forceOvlStr)
 					if err != nil {
 					if err != nil {
-						return nil, errors.Wrapf(err, "invalid boolean in BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF")
+						return struct{}{}, errors.Wrapf(err, "invalid boolean in BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF")
 					}
 					}
 					fallback = false // prohibit fallback on debug
 					fallback = false // prohibit fallback on debug
 				} else if !isTypeWindows(sr) {
 				} else if !isTypeWindows(sr) {
@@ -173,14 +174,14 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 					if !ok || err != nil {
 					if !ok || err != nil {
 						if !fallback {
 						if !fallback {
 							if !ok {
 							if !ok {
-								return nil, errors.Errorf("overlay mounts not detected (lower=%+v,upper=%+v)", lower, upper)
+								return struct{}{}, errors.Errorf("overlay mounts not detected (lower=%+v,upper=%+v)", lower, upper)
 							}
 							}
 							if err != nil {
 							if err != nil {
-								return nil, errors.Wrapf(err, "failed to compute overlay diff")
+								return struct{}{}, errors.Wrapf(err, "failed to compute overlay diff")
 							}
 							}
 						}
 						}
 						if logWarnOnErr {
 						if logWarnOnErr {
-							logrus.Warnf("failed to compute blob by overlay differ (ok=%v): %v", ok, err)
+							bklog.G(ctx).Warnf("failed to compute blob by overlay differ (ok=%v): %v", ok, err)
 						}
 						}
 					}
 					}
 					if ok {
 					if ok {
@@ -198,7 +199,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 						diff.WithCompressor(compressorFunc),
 						diff.WithCompressor(compressorFunc),
 					)
 					)
 					if err != nil {
 					if err != nil {
-						logrus.WithError(err).Warnf("failed to compute blob by buildkit differ")
+						bklog.G(ctx).WithError(err).Warnf("failed to compute blob by buildkit differ")
 					}
 					}
 				}
 				}
 
 
@@ -209,7 +210,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 						diff.WithCompressor(compressorFunc),
 						diff.WithCompressor(compressorFunc),
 					)
 					)
 					if err != nil {
 					if err != nil {
-						return nil, err
+						return struct{}{}, err
 					}
 					}
 				}
 				}
 
 
@@ -219,7 +220,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 				if finalize != nil {
 				if finalize != nil {
 					a, err := finalize(ctx, sr.cm.ContentStore)
 					a, err := finalize(ctx, sr.cm.ContentStore)
 					if err != nil {
 					if err != nil {
-						return nil, errors.Wrapf(err, "failed to finalize compression")
+						return struct{}{}, errors.Wrapf(err, "failed to finalize compression")
 					}
 					}
 					for k, v := range a {
 					for k, v := range a {
 						desc.Annotations[k] = v
 						desc.Annotations[k] = v
@@ -227,7 +228,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 				}
 				}
 				info, err := sr.cm.ContentStore.Info(ctx, desc.Digest)
 				info, err := sr.cm.ContentStore.Info(ctx, desc.Digest)
 				if err != nil {
 				if err != nil {
-					return nil, err
+					return struct{}{}, err
 				}
 				}
 
 
 				if diffID, ok := info.Labels[containerdUncompressed]; ok {
 				if diffID, ok := info.Labels[containerdUncompressed]; ok {
@@ -235,13 +236,13 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
 				} else if mediaType == ocispecs.MediaTypeImageLayer {
 				} else if mediaType == ocispecs.MediaTypeImageLayer {
 					desc.Annotations[containerdUncompressed] = desc.Digest.String()
 					desc.Annotations[containerdUncompressed] = desc.Digest.String()
 				} else {
 				} else {
-					return nil, errors.Errorf("unknown layer compression type")
+					return struct{}{}, errors.Errorf("unknown layer compression type")
 				}
 				}
 
 
 				if err := sr.setBlob(ctx, desc); err != nil {
 				if err := sr.setBlob(ctx, desc); err != nil {
-					return nil, err
+					return struct{}{}, err
 				}
 				}
-				return nil, nil
+				return struct{}{}, nil
 			})
 			})
 			if err != nil {
 			if err != nil {
 				return err
 				return err
@@ -415,29 +416,29 @@ func isTypeWindows(sr *immutableRef) bool {
 
 
 // ensureCompression ensures the specified ref has the blob of the specified compression Type.
 // ensureCompression ensures the specified ref has the blob of the specified compression Type.
 func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.Config, s session.Group) error {
 func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.Config, s session.Group) error {
-	_, err := g.Do(ctx, fmt.Sprintf("%s-%s", ref.ID(), comp.Type), func(ctx context.Context) (interface{}, error) {
+	_, err := g.Do(ctx, fmt.Sprintf("ensureComp-%s-%s", ref.ID(), comp.Type), func(ctx context.Context) (struct{}, error) {
 		desc, err := ref.ociDesc(ctx, ref.descHandlers, true)
 		desc, err := ref.ociDesc(ctx, ref.descHandlers, true)
 		if err != nil {
 		if err != nil {
-			return nil, err
+			return struct{}{}, err
 		}
 		}
 
 
 		// Resolve converters
 		// Resolve converters
 		layerConvertFunc, err := getConverter(ctx, ref.cm.ContentStore, desc, comp)
 		layerConvertFunc, err := getConverter(ctx, ref.cm.ContentStore, desc, comp)
 		if err != nil {
 		if err != nil {
-			return nil, err
+			return struct{}{}, err
 		} else if layerConvertFunc == nil {
 		} else if layerConvertFunc == nil {
 			if isLazy, err := ref.isLazy(ctx); err != nil {
 			if isLazy, err := ref.isLazy(ctx); err != nil {
-				return nil, err
+				return struct{}{}, err
 			} else if isLazy {
 			} else if isLazy {
 				// This ref can be used as the specified compressionType. Keep it lazy.
 				// This ref can be used as the specified compressionType. Keep it lazy.
-				return nil, nil
+				return struct{}{}, nil
 			}
 			}
-			return nil, ref.linkBlob(ctx, desc)
+			return struct{}{}, ref.linkBlob(ctx, desc)
 		}
 		}
 
 
 		// First, lookup local content store
 		// First, lookup local content store
 		if _, err := ref.getBlobWithCompression(ctx, comp.Type); err == nil {
 		if _, err := ref.getBlobWithCompression(ctx, comp.Type); err == nil {
-			return nil, nil // found the compression variant. no need to convert.
+			return struct{}{}, nil // found the compression variant. no need to convert.
 		}
 		}
 
 
 		// Convert layer compression type
 		// Convert layer compression type
@@ -447,18 +448,18 @@ func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.
 			dh:      ref.descHandlers[desc.Digest],
 			dh:      ref.descHandlers[desc.Digest],
 			session: s,
 			session: s,
 		}).Unlazy(ctx); err != nil {
 		}).Unlazy(ctx); err != nil {
-			return nil, err
+			return struct{}{}, err
 		}
 		}
 		newDesc, err := layerConvertFunc(ctx, ref.cm.ContentStore, desc)
 		newDesc, err := layerConvertFunc(ctx, ref.cm.ContentStore, desc)
 		if err != nil {
 		if err != nil {
-			return nil, errors.Wrapf(err, "failed to convert")
+			return struct{}{}, errors.Wrapf(err, "failed to convert")
 		}
 		}
 
 
 		// Start to track converted layer
 		// Start to track converted layer
 		if err := ref.linkBlob(ctx, *newDesc); err != nil {
 		if err := ref.linkBlob(ctx, *newDesc); err != nil {
-			return nil, errors.Wrapf(err, "failed to add compression blob")
+			return struct{}{}, errors.Wrapf(err, "failed to add compression blob")
 		}
 		}
-		return nil, nil
+		return struct{}{}, nil
 	})
 	})
 	return err
 	return err
 }
 }

+ 6 - 14
vendor/github.com/moby/buildkit/cache/compression_nydus.go

@@ -6,7 +6,6 @@ package cache
 import (
 import (
 	"compress/gzip"
 	"compress/gzip"
 	"context"
 	"context"
-	"encoding/json"
 	"io"
 	"io"
 
 
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/content"
@@ -18,13 +17,13 @@ import (
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 
 
-	nydusify "github.com/containerd/nydus-snapshotter/pkg/converter"
+	"github.com/containerd/nydus-snapshotter/pkg/converter"
 )
 )
 
 
 func init() {
 func init() {
 	additionalAnnotations = append(
 	additionalAnnotations = append(
 		additionalAnnotations,
 		additionalAnnotations,
-		nydusify.LayerAnnotationNydusBlob, nydusify.LayerAnnotationNydusBootstrap, nydusify.LayerAnnotationNydusBlobIDs,
+		converter.LayerAnnotationNydusBlob, converter.LayerAnnotationNydusBootstrap,
 	)
 	)
 }
 }
 
 
@@ -58,7 +57,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config,
 
 
 	// Extracts nydus bootstrap from nydus format for each layer.
 	// Extracts nydus bootstrap from nydus format for each layer.
 	var cm *cacheManager
 	var cm *cacheManager
-	layers := []nydusify.Layer{}
+	layers := []converter.Layer{}
 	blobIDs := []string{}
 	blobIDs := []string{}
 	for _, ref := range refs {
 	for _, ref := range refs {
 		blobDesc, err := getBlobWithCompressionWithRetry(ctx, ref, comp, s)
 		blobDesc, err := getBlobWithCompressionWithRetry(ctx, ref, comp, s)
@@ -74,7 +73,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config,
 			cm = ref.cm
 			cm = ref.cm
 		}
 		}
 		blobIDs = append(blobIDs, blobDesc.Digest.Hex())
 		blobIDs = append(blobIDs, blobDesc.Digest.Hex())
-		layers = append(layers, nydusify.Layer{
+		layers = append(layers, converter.Layer{
 			Digest:   blobDesc.Digest,
 			Digest:   blobDesc.Digest,
 			ReaderAt: ra,
 			ReaderAt: ra,
 		})
 		})
@@ -84,7 +83,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config,
 	pr, pw := io.Pipe()
 	pr, pw := io.Pipe()
 	go func() {
 	go func() {
 		defer pw.Close()
 		defer pw.Close()
-		if _, err := nydusify.Merge(ctx, layers, pw, nydusify.MergeOption{
+		if _, err := converter.Merge(ctx, layers, pw, converter.MergeOption{
 			WithTar: true,
 			WithTar: true,
 		}); err != nil {
 		}); err != nil {
 			pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap"))
 			pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap"))
@@ -125,11 +124,6 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config,
 		return nil, errors.Wrap(err, "get info from content store")
 		return nil, errors.Wrap(err, "get info from content store")
 	}
 	}
 
 
-	blobIDsBytes, err := json.Marshal(blobIDs)
-	if err != nil {
-		return nil, errors.Wrap(err, "marshal blob ids")
-	}
-
 	desc := ocispecs.Descriptor{
 	desc := ocispecs.Descriptor{
 		Digest:    compressedDgst,
 		Digest:    compressedDgst,
 		Size:      info.Size,
 		Size:      info.Size,
@@ -137,9 +131,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config,
 		Annotations: map[string]string{
 		Annotations: map[string]string{
 			containerdUncompressed: uncompressedDgst.Digest().String(),
 			containerdUncompressed: uncompressedDgst.Digest().String(),
 			// Use this annotation to identify nydus bootstrap layer.
 			// Use this annotation to identify nydus bootstrap layer.
-			nydusify.LayerAnnotationNydusBootstrap: "true",
-			// Track all blob digests for nydus snapshotter.
-			nydusify.LayerAnnotationNydusBlobIDs: string(blobIDsBytes),
+			converter.LayerAnnotationNydusBootstrap: "true",
 		},
 		},
 	}
 	}
 
 

+ 2 - 0
vendor/github.com/moby/buildkit/cache/contenthash/checksum.go

@@ -110,7 +110,9 @@ func (cm *cacheManager) GetCacheContext(ctx context.Context, md cache.RefMetadat
 	cm.lruMu.Unlock()
 	cm.lruMu.Unlock()
 	if ok {
 	if ok {
 		cm.locker.Unlock(md.ID())
 		cm.locker.Unlock(md.ID())
+		v.(*cacheContext).mu.Lock() // locking is required because multiple ImmutableRefs can reach this code; however none of them use the linkMap.
 		v.(*cacheContext).linkMap = map[string][][]byte{}
 		v.(*cacheContext).linkMap = map[string][][]byte{}
+		v.(*cacheContext).mu.Unlock()
 		return v.(*cacheContext), nil
 		return v.(*cacheContext), nil
 	}
 	}
 	cc, err := newCacheContext(md)
 	cc, err := newCacheContext(md)

+ 1 - 8
vendor/github.com/moby/buildkit/cache/filelist.go

@@ -20,7 +20,7 @@ const keyFileList = "filelist"
 // are in the tar stream (AUFS whiteout format). If the reference does not have a
 // are in the tar stream (AUFS whiteout format). If the reference does not have a
 // a blob associated with it, the list is empty.
 // a blob associated with it, the list is empty.
 func (sr *immutableRef) FileList(ctx context.Context, s session.Group) ([]string, error) {
 func (sr *immutableRef) FileList(ctx context.Context, s session.Group) ([]string, error) {
-	res, err := g.Do(ctx, fmt.Sprintf("filelist-%s", sr.ID()), func(ctx context.Context) (interface{}, error) {
+	return gFileList.Do(ctx, fmt.Sprintf("filelist-%s", sr.ID()), func(ctx context.Context) ([]string, error) {
 		dt, err := sr.GetExternal(keyFileList)
 		dt, err := sr.GetExternal(keyFileList)
 		if err == nil && dt != nil {
 		if err == nil && dt != nil {
 			var files []string
 			var files []string
@@ -80,11 +80,4 @@ func (sr *immutableRef) FileList(ctx context.Context, s session.Group) ([]string
 		}
 		}
 		return files, nil
 		return files, nil
 	})
 	})
-	if err != nil {
-		return nil, err
-	}
-	if res == nil {
-		return nil, nil
-	}
-	return res.([]string), nil
 }
 }

+ 6 - 6
vendor/github.com/moby/buildkit/cache/manager.go

@@ -27,7 +27,6 @@ import (
 	imagespecidentity "github.com/opencontainers/image-spec/identity"
 	imagespecidentity "github.com/opencontainers/image-spec/identity"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sync/errgroup"
 	"golang.org/x/sync/errgroup"
 )
 )
 
 
@@ -94,7 +93,7 @@ type cacheManager struct {
 	mountPool sharableMountPool
 	mountPool sharableMountPool
 
 
 	muPrune sync.Mutex // make sure parallel prune is not allowed so there will not be inconsistent results
 	muPrune sync.Mutex // make sure parallel prune is not allowed so there will not be inconsistent results
-	unlazyG flightcontrol.Group
+	unlazyG flightcontrol.Group[struct{}]
 }
 }
 
 
 func NewManager(opt ManagerOpt) (Manager, error) {
 func NewManager(opt ManagerOpt) (Manager, error) {
@@ -243,7 +242,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor,
 			if err := cm.LeaseManager.Delete(context.TODO(), leases.Lease{
 			if err := cm.LeaseManager.Delete(context.TODO(), leases.Lease{
 				ID: l.ID,
 				ID: l.ID,
 			}); err != nil {
 			}); err != nil {
-				logrus.Errorf("failed to remove lease: %+v", err)
+				bklog.G(ctx).Errorf("failed to remove lease: %+v", err)
 			}
 			}
 		}
 		}
 	}()
 	}()
@@ -319,7 +318,7 @@ func (cm *cacheManager) init(ctx context.Context) error {
 
 
 	for _, si := range items {
 	for _, si := range items {
 		if _, err := cm.getRecord(ctx, si.ID()); err != nil {
 		if _, err := cm.getRecord(ctx, si.ID()); err != nil {
-			logrus.Debugf("could not load snapshot %s: %+v", si.ID(), err)
+			bklog.G(ctx).Debugf("could not load snapshot %s: %+v", si.ID(), err)
 			cm.MetadataStore.Clear(si.ID())
 			cm.MetadataStore.Clear(si.ID())
 			cm.LeaseManager.Delete(ctx, leases.Lease{ID: si.ID()})
 			cm.LeaseManager.Delete(ctx, leases.Lease{ID: si.ID()})
 		}
 		}
@@ -597,7 +596,7 @@ func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, sess session.Gr
 			if err := cm.LeaseManager.Delete(context.TODO(), leases.Lease{
 			if err := cm.LeaseManager.Delete(context.TODO(), leases.Lease{
 				ID: l.ID,
 				ID: l.ID,
 			}); err != nil {
 			}); err != nil {
-				logrus.Errorf("failed to remove lease: %+v", err)
+				bklog.G(ctx).Errorf("failed to remove lease: %+v", err)
 			}
 			}
 		}
 		}
 	}()
 	}()
@@ -1426,12 +1425,13 @@ func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo)
 						d.Size = 0
 						d.Size = 0
 						return nil
 						return nil
 					}
 					}
+					defer ref.Release(context.TODO())
 					s, err := ref.size(ctx)
 					s, err := ref.size(ctx)
 					if err != nil {
 					if err != nil {
 						return err
 						return err
 					}
 					}
 					d.Size = s
 					d.Size = s
-					return ref.Release(context.TODO())
+					return nil
 				})
 				})
 			}(d)
 			}(d)
 		}
 		}

+ 1 - 1
vendor/github.com/moby/buildkit/cache/metadata.go

@@ -87,7 +87,7 @@ func (cm *cacheManager) Search(ctx context.Context, idx string) ([]RefMetadata,
 
 
 // callers must hold cm.mu lock
 // callers must hold cm.mu lock
 func (cm *cacheManager) search(ctx context.Context, idx string) ([]RefMetadata, error) {
 func (cm *cacheManager) search(ctx context.Context, idx string) ([]RefMetadata, error) {
-	sis, err := cm.MetadataStore.Search(idx)
+	sis, err := cm.MetadataStore.Search(ctx, idx)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}

+ 4 - 3
vendor/github.com/moby/buildkit/cache/metadata/metadata.go

@@ -2,12 +2,13 @@ package metadata
 
 
 import (
 import (
 	"bytes"
 	"bytes"
+	"context"
 	"encoding/json"
 	"encoding/json"
 	"strings"
 	"strings"
 	"sync"
 	"sync"
 
 
+	"github.com/moby/buildkit/util/bklog"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	bolt "go.etcd.io/bbolt"
 	bolt "go.etcd.io/bbolt"
 )
 )
 
 
@@ -80,7 +81,7 @@ func (s *Store) Probe(index string) (bool, error) {
 	return exists, errors.WithStack(err)
 	return exists, errors.WithStack(err)
 }
 }
 
 
-func (s *Store) Search(index string) ([]*StorageItem, error) {
+func (s *Store) Search(ctx context.Context, index string) ([]*StorageItem, error) {
 	var out []*StorageItem
 	var out []*StorageItem
 	err := s.db.View(func(tx *bolt.Tx) error {
 	err := s.db.View(func(tx *bolt.Tx) error {
 		b := tx.Bucket([]byte(indexBucket))
 		b := tx.Bucket([]byte(indexBucket))
@@ -100,7 +101,7 @@ func (s *Store) Search(index string) ([]*StorageItem, error) {
 				k, _ = c.Next()
 				k, _ = c.Next()
 				b := main.Bucket([]byte(itemID))
 				b := main.Bucket([]byte(itemID))
 				if b == nil {
 				if b == nil {
-					logrus.Errorf("index pointing to missing record %s", itemID)
+					bklog.G(ctx).Errorf("index pointing to missing record %s", itemID)
 					continue
 					continue
 				}
 				}
 				si, err := newStorageItem(itemID, b, s)
 				si, err := newStorageItem(itemID, b, s)

+ 92 - 30
vendor/github.com/moby/buildkit/cache/refs.go

@@ -27,6 +27,7 @@ import (
 	"github.com/moby/buildkit/util/compression"
 	"github.com/moby/buildkit/util/compression"
 	"github.com/moby/buildkit/util/flightcontrol"
 	"github.com/moby/buildkit/util/flightcontrol"
 	"github.com/moby/buildkit/util/leaseutil"
 	"github.com/moby/buildkit/util/leaseutil"
+	"github.com/moby/buildkit/util/overlay"
 	"github.com/moby/buildkit/util/progress"
 	"github.com/moby/buildkit/util/progress"
 	rootlessmountopts "github.com/moby/buildkit/util/rootless/mountopts"
 	rootlessmountopts "github.com/moby/buildkit/util/rootless/mountopts"
 	"github.com/moby/buildkit/util/winlayers"
 	"github.com/moby/buildkit/util/winlayers"
@@ -89,7 +90,7 @@ type cacheRecord struct {
 
 
 	mountCache snapshot.Mountable
 	mountCache snapshot.Mountable
 
 
-	sizeG flightcontrol.Group
+	sizeG flightcontrol.Group[int64]
 
 
 	// these are filled if multiple refs point to same data
 	// these are filled if multiple refs point to same data
 	equalMutable   *mutableRef
 	equalMutable   *mutableRef
@@ -107,6 +108,7 @@ func (cr *cacheRecord) ref(triggerLastUsed bool, descHandlers DescHandlers, pg p
 		progress:        pg,
 		progress:        pg,
 	}
 	}
 	cr.refs[ref] = struct{}{}
 	cr.refs[ref] = struct{}{}
+	bklog.G(context.TODO()).WithFields(ref.traceLogFields()).Trace("acquired cache ref")
 	return ref
 	return ref
 }
 }
 
 
@@ -118,6 +120,7 @@ func (cr *cacheRecord) mref(triggerLastUsed bool, descHandlers DescHandlers) *mu
 		descHandlers:    descHandlers,
 		descHandlers:    descHandlers,
 	}
 	}
 	cr.refs[ref] = struct{}{}
 	cr.refs[ref] = struct{}{}
+	bklog.G(context.TODO()).WithFields(ref.traceLogFields()).Trace("acquired cache ref")
 	return ref
 	return ref
 }
 }
 
 
@@ -322,7 +325,7 @@ func (cr *cacheRecord) viewSnapshotID() string {
 
 
 func (cr *cacheRecord) size(ctx context.Context) (int64, error) {
 func (cr *cacheRecord) size(ctx context.Context) (int64, error) {
 	// this expects that usage() is implemented lazily
 	// this expects that usage() is implemented lazily
-	s, err := cr.sizeG.Do(ctx, cr.ID(), func(ctx context.Context) (interface{}, error) {
+	return cr.sizeG.Do(ctx, cr.ID(), func(ctx context.Context) (int64, error) {
 		cr.mu.Lock()
 		cr.mu.Lock()
 		s := cr.getSize()
 		s := cr.getSize()
 		if s != sizeUnknown {
 		if s != sizeUnknown {
@@ -343,7 +346,7 @@ func (cr *cacheRecord) size(ctx context.Context) (int64, error) {
 				isDead := cr.isDead()
 				isDead := cr.isDead()
 				cr.mu.Unlock()
 				cr.mu.Unlock()
 				if isDead {
 				if isDead {
-					return int64(0), nil
+					return 0, nil
 				}
 				}
 				if !errors.Is(err, errdefs.ErrNotFound) {
 				if !errors.Is(err, errdefs.ErrNotFound) {
 					return s, errors.Wrapf(err, "failed to get usage for %s", cr.ID())
 					return s, errors.Wrapf(err, "failed to get usage for %s", cr.ID())
@@ -376,10 +379,6 @@ func (cr *cacheRecord) size(ctx context.Context) (int64, error) {
 		cr.mu.Unlock()
 		cr.mu.Unlock()
 		return usage.Size, nil
 		return usage.Size, nil
 	})
 	})
-	if err != nil {
-		return 0, err
-	}
-	return s.(int64), nil
 }
 }
 
 
 // caller must hold cr.mu
 // caller must hold cr.mu
@@ -438,7 +437,19 @@ func (cr *cacheRecord) mount(ctx context.Context, s session.Group) (_ snapshot.M
 }
 }
 
 
 // call when holding the manager lock
 // call when holding the manager lock
-func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) error {
+func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) (rerr error) {
+	defer func() {
+		l := bklog.G(ctx).WithFields(map[string]any{
+			"id":             cr.ID(),
+			"refCount":       len(cr.refs),
+			"removeSnapshot": removeSnapshot,
+			"stack":          bklog.LazyStackTrace{},
+		})
+		if rerr != nil {
+			l = l.WithError(rerr)
+		}
+		l.Trace("removed cache record")
+	}()
 	delete(cr.cm.records, cr.ID())
 	delete(cr.cm.records, cr.ID())
 	if removeSnapshot {
 	if removeSnapshot {
 		if err := cr.cm.LeaseManager.Delete(ctx, leases.Lease{
 		if err := cr.cm.LeaseManager.Delete(ctx, leases.Lease{
@@ -469,6 +480,24 @@ type immutableRef struct {
 	progress progress.Controller
 	progress progress.Controller
 }
 }
 
 
+// hold ref lock before calling
+func (sr *immutableRef) traceLogFields() logrus.Fields {
+	m := map[string]any{
+		"id":          sr.ID(),
+		"refID":       fmt.Sprintf("%p", sr),
+		"newRefCount": len(sr.refs),
+		"mutable":     false,
+		"stack":       bklog.LazyStackTrace{},
+	}
+	if sr.equalMutable != nil {
+		m["equalMutableID"] = sr.equalMutable.ID()
+	}
+	if sr.equalImmutable != nil {
+		m["equalImmutableID"] = sr.equalImmutable.ID()
+	}
+	return m
+}
+
 // Order is from parent->child, sr will be at end of slice. Refs should not
 // Order is from parent->child, sr will be at end of slice. Refs should not
 // be released as they are used internally in the underlying cacheRecords.
 // be released as they are used internally in the underlying cacheRecords.
 func (sr *immutableRef) layerChain() []*immutableRef {
 func (sr *immutableRef) layerChain() []*immutableRef {
@@ -591,6 +620,24 @@ type mutableRef struct {
 	descHandlers    DescHandlers
 	descHandlers    DescHandlers
 }
 }
 
 
+// hold ref lock before calling
+func (sr *mutableRef) traceLogFields() logrus.Fields {
+	m := map[string]any{
+		"id":          sr.ID(),
+		"refID":       fmt.Sprintf("%p", sr),
+		"newRefCount": len(sr.refs),
+		"mutable":     true,
+		"stack":       bklog.LazyStackTrace{},
+	}
+	if sr.equalMutable != nil {
+		m["equalMutableID"] = sr.equalMutable.ID()
+	}
+	if sr.equalImmutable != nil {
+		m["equalImmutableID"] = sr.equalImmutable.ID()
+	}
+	return m
+}
+
 func (sr *mutableRef) DescHandler(dgst digest.Digest) *DescHandler {
 func (sr *mutableRef) DescHandler(dgst digest.Digest) *DescHandler {
 	return sr.descHandlers[dgst]
 	return sr.descHandlers[dgst]
 }
 }
@@ -615,11 +662,11 @@ func layerToDistributable(mt string) string {
 	}
 	}
 
 
 	switch mt {
 	switch mt {
-	case ocispecs.MediaTypeImageLayerNonDistributable:
+	case ocispecs.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use.
 		return ocispecs.MediaTypeImageLayer
 		return ocispecs.MediaTypeImageLayer
-	case ocispecs.MediaTypeImageLayerNonDistributableGzip:
+	case ocispecs.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use.
 		return ocispecs.MediaTypeImageLayerGzip
 		return ocispecs.MediaTypeImageLayerGzip
-	case ocispecs.MediaTypeImageLayerNonDistributableZstd:
+	case ocispecs.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use.
 		return ocispecs.MediaTypeImageLayerZstd
 		return ocispecs.MediaTypeImageLayerZstd
 	case images.MediaTypeDockerSchema2LayerForeign:
 	case images.MediaTypeDockerSchema2LayerForeign:
 		return images.MediaTypeDockerSchema2Layer
 		return images.MediaTypeDockerSchema2Layer
@@ -633,11 +680,11 @@ func layerToDistributable(mt string) string {
 func layerToNonDistributable(mt string) string {
 func layerToNonDistributable(mt string) string {
 	switch mt {
 	switch mt {
 	case ocispecs.MediaTypeImageLayer:
 	case ocispecs.MediaTypeImageLayer:
-		return ocispecs.MediaTypeImageLayerNonDistributable
+		return ocispecs.MediaTypeImageLayerNonDistributable //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use.
 	case ocispecs.MediaTypeImageLayerGzip:
 	case ocispecs.MediaTypeImageLayerGzip:
-		return ocispecs.MediaTypeImageLayerNonDistributableGzip
+		return ocispecs.MediaTypeImageLayerNonDistributableGzip //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use.
 	case ocispecs.MediaTypeImageLayerZstd:
 	case ocispecs.MediaTypeImageLayerZstd:
-		return ocispecs.MediaTypeImageLayerNonDistributableZstd
+		return ocispecs.MediaTypeImageLayerNonDistributableZstd //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use.
 	case images.MediaTypeDockerSchema2Layer:
 	case images.MediaTypeDockerSchema2Layer:
 		return images.MediaTypeDockerSchema2LayerForeign
 		return images.MediaTypeDockerSchema2LayerForeign
 	case images.MediaTypeDockerSchema2LayerForeignGzip:
 	case images.MediaTypeDockerSchema2LayerForeignGzip:
@@ -993,7 +1040,7 @@ func (sr *immutableRef) withRemoteSnapshotLabelsStargzMode(ctx context.Context,
 				info.Labels[k] = "" // Remove labels appended in this call
 				info.Labels[k] = "" // Remove labels appended in this call
 			}
 			}
 			if _, err := r.cm.Snapshotter.Update(ctx, info, flds...); err != nil {
 			if _, err := r.cm.Snapshotter.Update(ctx, info, flds...); err != nil {
-				logrus.Warn(errors.Wrapf(err, "failed to remove tmp remote labels"))
+				bklog.G(ctx).Warn(errors.Wrapf(err, "failed to remove tmp remote labels"))
 			}
 			}
 		}()
 		}()
 
 
@@ -1006,7 +1053,7 @@ func (sr *immutableRef) withRemoteSnapshotLabelsStargzMode(ctx context.Context,
 }
 }
 
 
 func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s session.Group) error {
 func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s session.Group) error {
-	_, err := sr.sizeG.Do(ctx, sr.ID()+"-prepare-remote-snapshot", func(ctx context.Context) (_ interface{}, rerr error) {
+	_, err := g.Do(ctx, sr.ID()+"-prepare-remote-snapshot", func(ctx context.Context) (_ struct{}, rerr error) {
 		dhs := sr.descHandlers
 		dhs := sr.descHandlers
 		for _, r := range sr.layerChain() {
 		for _, r := range sr.layerChain() {
 			r := r
 			r := r
@@ -1018,7 +1065,7 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s
 			dh := dhs[digest.Digest(r.getBlob())]
 			dh := dhs[digest.Digest(r.getBlob())]
 			if dh == nil {
 			if dh == nil {
 				// We cannot prepare remote snapshots without descHandler.
 				// We cannot prepare remote snapshots without descHandler.
-				return nil, nil
+				return struct{}{}, nil
 			}
 			}
 
 
 			// tmpLabels contains dh.SnapshotLabels + session IDs. All keys contain
 			// tmpLabels contains dh.SnapshotLabels + session IDs. All keys contain
@@ -1055,7 +1102,7 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s
 								info.Labels[k] = ""
 								info.Labels[k] = ""
 							}
 							}
 							if _, err := r.cm.Snapshotter.Update(ctx, info, tmpFields...); err != nil {
 							if _, err := r.cm.Snapshotter.Update(ctx, info, tmpFields...); err != nil {
-								logrus.Warn(errors.Wrapf(err,
+								bklog.G(ctx).Warn(errors.Wrapf(err,
 									"failed to remove tmp remote labels after prepare"))
 									"failed to remove tmp remote labels after prepare"))
 							}
 							}
 						}()
 						}()
@@ -1070,7 +1117,7 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s
 			break
 			break
 		}
 		}
 
 
-		return nil, nil
+		return struct{}{}, nil
 	})
 	})
 	return err
 	return err
 }
 }
@@ -1093,18 +1140,18 @@ func makeTmpLabelsStargzMode(labels map[string]string, s session.Group) (fields
 }
 }
 
 
 func (sr *immutableRef) unlazy(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, topLevel bool) error {
 func (sr *immutableRef) unlazy(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, topLevel bool) error {
-	_, err := sr.sizeG.Do(ctx, sr.ID()+"-unlazy", func(ctx context.Context) (_ interface{}, rerr error) {
+	_, err := g.Do(ctx, sr.ID()+"-unlazy", func(ctx context.Context) (_ struct{}, rerr error) {
 		if _, err := sr.cm.Snapshotter.Stat(ctx, sr.getSnapshotID()); err == nil {
 		if _, err := sr.cm.Snapshotter.Stat(ctx, sr.getSnapshotID()); err == nil {
-			return nil, nil
+			return struct{}{}, nil
 		}
 		}
 
 
 		switch sr.kind() {
 		switch sr.kind() {
 		case Merge, Diff:
 		case Merge, Diff:
-			return nil, sr.unlazyDiffMerge(ctx, dhs, pg, s, topLevel)
+			return struct{}{}, sr.unlazyDiffMerge(ctx, dhs, pg, s, topLevel)
 		case Layer, BaseLayer:
 		case Layer, BaseLayer:
-			return nil, sr.unlazyLayer(ctx, dhs, pg, s)
+			return struct{}{}, sr.unlazyLayer(ctx, dhs, pg, s)
 		}
 		}
-		return nil, nil
+		return struct{}{}, nil
 	})
 	})
 	return err
 	return err
 }
 }
@@ -1294,9 +1341,16 @@ func (sr *immutableRef) updateLastUsedNow() bool {
 	return true
 	return true
 }
 }
 
 
-func (sr *immutableRef) release(ctx context.Context) error {
-	delete(sr.refs, sr)
+func (sr *immutableRef) release(ctx context.Context) (rerr error) {
+	defer func() {
+		l := bklog.G(ctx).WithFields(sr.traceLogFields())
+		if rerr != nil {
+			l = l.WithError(rerr)
+		}
+		l.Trace("released cache ref")
+	}()
 
 
+	delete(sr.refs, sr)
 	if sr.updateLastUsedNow() {
 	if sr.updateLastUsedNow() {
 		sr.updateLastUsed()
 		sr.updateLastUsed()
 		if sr.equalMutable != nil {
 		if sr.equalMutable != nil {
@@ -1363,7 +1417,7 @@ func (cr *cacheRecord) finalize(ctx context.Context) error {
 		cr.cm.mu.Lock()
 		cr.cm.mu.Lock()
 		defer cr.cm.mu.Unlock()
 		defer cr.cm.mu.Unlock()
 		if err := mutable.remove(context.TODO(), true); err != nil {
 		if err := mutable.remove(context.TODO(), true); err != nil {
-			logrus.Error(err)
+			bklog.G(ctx).Error(err)
 		}
 		}
 	}()
 	}()
 
 
@@ -1476,8 +1530,16 @@ func (sr *mutableRef) Release(ctx context.Context) error {
 	return sr.release(ctx)
 	return sr.release(ctx)
 }
 }
 
 
-func (sr *mutableRef) release(ctx context.Context) error {
+func (sr *mutableRef) release(ctx context.Context) (rerr error) {
+	defer func() {
+		l := bklog.G(ctx).WithFields(sr.traceLogFields())
+		if rerr != nil {
+			l = l.WithError(rerr)
+		}
+		l.Trace("released cache ref")
+	}()
 	delete(sr.refs, sr)
 	delete(sr.refs, sr)
+
 	if !sr.HasCachePolicyRetain() {
 	if !sr.HasCachePolicyRetain() {
 		if sr.equalImmutable != nil {
 		if sr.equalImmutable != nil {
 			if sr.equalImmutable.HasCachePolicyRetain() {
 			if sr.equalImmutable.HasCachePolicyRetain() {
@@ -1514,7 +1576,7 @@ func (m *readOnlyMounter) Mount() ([]mount.Mount, func() error, error) {
 		return nil, nil, err
 		return nil, nil, err
 	}
 	}
 	for i, m := range mounts {
 	for i, m := range mounts {
-		if m.Type == "overlay" {
+		if overlay.IsOverlayMountType(m) {
 			mounts[i].Options = readonlyOverlay(m.Options)
 			mounts[i].Options = readonlyOverlay(m.Options)
 			continue
 			continue
 		}
 		}
@@ -1624,7 +1686,7 @@ func (sm *sharableMountable) Mount() (_ []mount.Mount, _ func() error, retErr er
 		}()
 		}()
 		var isOverlay bool
 		var isOverlay bool
 		for _, m := range mounts {
 		for _, m := range mounts {
-			if m.Type == "overlay" {
+			if overlay.IsOverlayMountType(m) {
 				isOverlay = true
 				isOverlay = true
 				break
 				break
 			}
 			}

+ 7 - 7
vendor/github.com/moby/buildkit/cache/remote.go

@@ -305,11 +305,11 @@ func (p lazyRefProvider) ReaderAt(ctx context.Context, desc ocispecs.Descriptor)
 }
 }
 
 
 func (p lazyRefProvider) Unlazy(ctx context.Context) error {
 func (p lazyRefProvider) Unlazy(ctx context.Context) error {
-	_, err := p.ref.cm.unlazyG.Do(ctx, string(p.desc.Digest), func(ctx context.Context) (_ interface{}, rerr error) {
+	_, err := p.ref.cm.unlazyG.Do(ctx, string(p.desc.Digest), func(ctx context.Context) (_ struct{}, rerr error) {
 		if isLazy, err := p.ref.isLazy(ctx); err != nil {
 		if isLazy, err := p.ref.isLazy(ctx); err != nil {
-			return nil, err
+			return struct{}{}, err
 		} else if !isLazy {
 		} else if !isLazy {
-			return nil, nil
+			return struct{}{}, nil
 		}
 		}
 		defer func() {
 		defer func() {
 			if rerr == nil {
 			if rerr == nil {
@@ -320,7 +320,7 @@ func (p lazyRefProvider) Unlazy(ctx context.Context) error {
 		if p.dh == nil {
 		if p.dh == nil {
 			// shouldn't happen, if you have a lazy immutable ref it already should be validated
 			// shouldn't happen, if you have a lazy immutable ref it already should be validated
 			// that descriptor handlers exist for it
 			// that descriptor handlers exist for it
-			return nil, errors.New("unexpected nil descriptor handler")
+			return struct{}{}, errors.New("unexpected nil descriptor handler")
 		}
 		}
 
 
 		if p.dh.Progress != nil {
 		if p.dh.Progress != nil {
@@ -337,7 +337,7 @@ func (p lazyRefProvider) Unlazy(ctx context.Context) error {
 			Manager:  p.ref.cm.ContentStore,
 			Manager:  p.ref.cm.ContentStore,
 		}, p.desc, p.dh.Ref, logs.LoggerFromContext(ctx))
 		}, p.desc, p.dh.Ref, logs.LoggerFromContext(ctx))
 		if err != nil {
 		if err != nil {
-			return nil, err
+			return struct{}{}, err
 		}
 		}
 
 
 		if imageRefs := p.ref.getImageRefs(); len(imageRefs) > 0 {
 		if imageRefs := p.ref.getImageRefs(); len(imageRefs) > 0 {
@@ -345,12 +345,12 @@ func (p lazyRefProvider) Unlazy(ctx context.Context) error {
 			imageRef := imageRefs[0]
 			imageRef := imageRefs[0]
 			if p.ref.GetDescription() == "" {
 			if p.ref.GetDescription() == "" {
 				if err := p.ref.SetDescription("pulled from " + imageRef); err != nil {
 				if err := p.ref.SetDescription("pulled from " + imageRef); err != nil {
-					return nil, err
+					return struct{}{}, err
 				}
 				}
 			}
 			}
 		}
 		}
 
 
-		return nil, nil
+		return struct{}{}, nil
 	})
 	})
 	return err
 	return err
 }
 }

+ 136 - 31
vendor/github.com/moby/buildkit/cache/remotecache/export.go

@@ -16,7 +16,7 @@ import (
 	"github.com/moby/buildkit/util/progress"
 	"github.com/moby/buildkit/util/progress"
 	"github.com/moby/buildkit/util/progress/logs"
 	"github.com/moby/buildkit/util/progress/logs"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
-	specs "github.com/opencontainers/image-spec/specs-go"
+	"github.com/opencontainers/image-spec/specs-go"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
@@ -37,24 +37,135 @@ type Config struct {
 	Compression compression.Config
 	Compression compression.Config
 }
 }
 
 
+type CacheType int
+
 const (
 const (
 	// ExportResponseManifestDesc is a key for the map returned from Exporter.Finalize.
 	// ExportResponseManifestDesc is a key for the map returned from Exporter.Finalize.
 	// The map value is a JSON string of an OCI desciptor of a manifest.
 	// The map value is a JSON string of an OCI desciptor of a manifest.
 	ExporterResponseManifestDesc = "cache.manifest"
 	ExporterResponseManifestDesc = "cache.manifest"
 )
 )
 
 
-type contentCacheExporter struct {
-	solver.CacheExporterTarget
-	chains   *v1.CacheChains
-	ingester content.Ingester
-	oci      bool
-	ref      string
-	comp     compression.Config
+const (
+	NotSet CacheType = iota
+	ManifestList
+	ImageManifest
+)
+
+func (data CacheType) String() string {
+	switch data {
+	case ManifestList:
+		return "Manifest List"
+	case ImageManifest:
+		return "Image Manifest"
+	default:
+		return "Not Set"
+	}
 }
 }
 
 
-func NewExporter(ingester content.Ingester, ref string, oci bool, compressionConfig compression.Config) Exporter {
+func NewExporter(ingester content.Ingester, ref string, oci bool, imageManifest bool, compressionConfig compression.Config) Exporter {
 	cc := v1.NewCacheChains()
 	cc := v1.NewCacheChains()
-	return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester, oci: oci, ref: ref, comp: compressionConfig}
+	return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester, oci: oci, imageManifest: imageManifest, ref: ref, comp: compressionConfig}
+}
+
+type ExportableCache struct {
+	// This cache describes two distinct styles of exportable cache, one is an Index (or Manifest List) of blobs,
+	// or as an artifact using the OCI image manifest format.
+	ExportedManifest ocispecs.Manifest
+	ExportedIndex    ocispecs.Index
+	CacheType        CacheType
+	OCI              bool
+}
+
+func NewExportableCache(oci bool, imageManifest bool) (*ExportableCache, error) {
+	var mediaType string
+
+	if imageManifest {
+		mediaType = ocispecs.MediaTypeImageManifest
+		if !oci {
+			return nil, errors.Errorf("invalid configuration for remote cache")
+		}
+	} else {
+		if oci {
+			mediaType = ocispecs.MediaTypeImageIndex
+		} else {
+			mediaType = images.MediaTypeDockerSchema2ManifestList
+		}
+	}
+
+	cacheType := ManifestList
+	if imageManifest {
+		cacheType = ImageManifest
+	}
+
+	schemaVersion := specs.Versioned{SchemaVersion: 2}
+	switch cacheType {
+	case ManifestList:
+		return &ExportableCache{ExportedIndex: ocispecs.Index{
+			MediaType: mediaType,
+			Versioned: schemaVersion,
+		},
+			CacheType: cacheType,
+			OCI:       oci,
+		}, nil
+	case ImageManifest:
+		return &ExportableCache{ExportedManifest: ocispecs.Manifest{
+			MediaType: mediaType,
+			Versioned: schemaVersion,
+		},
+			CacheType: cacheType,
+			OCI:       oci,
+		}, nil
+	default:
+		return nil, errors.Errorf("exportable cache type not set")
+	}
+}
+
+func (ec *ExportableCache) MediaType() string {
+	if ec.CacheType == ManifestList {
+		return ec.ExportedIndex.MediaType
+	}
+	return ec.ExportedManifest.MediaType
+}
+
+func (ec *ExportableCache) AddCacheBlob(blob ocispecs.Descriptor) {
+	if ec.CacheType == ManifestList {
+		ec.ExportedIndex.Manifests = append(ec.ExportedIndex.Manifests, blob)
+	} else {
+		ec.ExportedManifest.Layers = append(ec.ExportedManifest.Layers, blob)
+	}
+}
+
+func (ec *ExportableCache) FinalizeCache(ctx context.Context) {
+	if ec.CacheType == ManifestList {
+		ec.ExportedIndex.Manifests = compression.ConvertAllLayerMediaTypes(ctx, ec.OCI, ec.ExportedIndex.Manifests...)
+	} else {
+		ec.ExportedManifest.Layers = compression.ConvertAllLayerMediaTypes(ctx, ec.OCI, ec.ExportedManifest.Layers...)
+	}
+}
+
+func (ec *ExportableCache) SetConfig(config ocispecs.Descriptor) {
+	if ec.CacheType == ManifestList {
+		ec.ExportedIndex.Manifests = append(ec.ExportedIndex.Manifests, config)
+	} else {
+		ec.ExportedManifest.Config = config
+	}
+}
+
+func (ec *ExportableCache) MarshalJSON() ([]byte, error) {
+	if ec.CacheType == ManifestList {
+		return json.Marshal(ec.ExportedIndex)
+	}
+	return json.Marshal(ec.ExportedManifest)
+}
+
+type contentCacheExporter struct {
+	solver.CacheExporterTarget
+	chains        *v1.CacheChains
+	ingester      content.Ingester
+	oci           bool
+	imageManifest bool
+	ref           string
+	comp          compression.Config
 }
 }
 
 
 func (ce *contentCacheExporter) Name() string {
 func (ce *contentCacheExporter) Name() string {
@@ -74,21 +185,9 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	// own type because oci type can't be pushed and docker type doesn't have annotations
-	type manifestList struct {
-		specs.Versioned
-
-		MediaType string `json:"mediaType,omitempty"`
-
-		// Manifests references platform specific manifests.
-		Manifests []ocispecs.Descriptor `json:"manifests"`
-	}
-
-	var mfst manifestList
-	mfst.SchemaVersion = 2
-	mfst.MediaType = images.MediaTypeDockerSchema2ManifestList
-	if ce.oci {
-		mfst.MediaType = ocispecs.MediaTypeImageIndex
+	cache, err := NewExportableCache(ce.oci, ce.imageManifest)
+	if err != nil {
+		return nil, err
 	}
 	}
 
 
 	for _, l := range config.Layers {
 	for _, l := range config.Layers {
@@ -101,10 +200,10 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string
 			return nil, layerDone(errors.Wrap(err, "error writing layer blob"))
 			return nil, layerDone(errors.Wrap(err, "error writing layer blob"))
 		}
 		}
 		layerDone(nil)
 		layerDone(nil)
-		mfst.Manifests = append(mfst.Manifests, dgstPair.Descriptor)
+		cache.AddCacheBlob(dgstPair.Descriptor)
 	}
 	}
 
 
-	mfst.Manifests = compression.ConvertAllLayerMediaTypes(ce.oci, mfst.Manifests...)
+	cache.FinalizeCache(ctx)
 
 
 	dt, err := json.Marshal(config)
 	dt, err := json.Marshal(config)
 	if err != nil {
 	if err != nil {
@@ -122,9 +221,9 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string
 	}
 	}
 	configDone(nil)
 	configDone(nil)
 
 
-	mfst.Manifests = append(mfst.Manifests, desc)
+	cache.SetConfig(desc)
 
 
-	dt, err = json.Marshal(mfst)
+	dt, err = cache.MarshalJSON()
 	if err != nil {
 	if err != nil {
 		return nil, errors.Wrap(err, "failed to marshal manifest")
 		return nil, errors.Wrap(err, "failed to marshal manifest")
 	}
 	}
@@ -133,9 +232,14 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string
 	desc = ocispecs.Descriptor{
 	desc = ocispecs.Descriptor{
 		Digest:    dgst,
 		Digest:    dgst,
 		Size:      int64(len(dt)),
 		Size:      int64(len(dt)),
-		MediaType: mfst.MediaType,
+		MediaType: cache.MediaType(),
 	}
 	}
-	mfstDone := progress.OneOff(ctx, fmt.Sprintf("writing manifest %s", dgst))
+
+	mfstLog := fmt.Sprintf("writing cache manifest %s", dgst)
+	if ce.imageManifest {
+		mfstLog = fmt.Sprintf("writing cache image manifest %s", dgst)
+	}
+	mfstDone := progress.OneOff(ctx, mfstLog)
 	if err := content.WriteBlob(ctx, ce.ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil {
 	if err := content.WriteBlob(ctx, ce.ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil {
 		return nil, mfstDone(errors.Wrap(err, "error writing manifest blob"))
 		return nil, mfstDone(errors.Wrap(err, "error writing manifest blob"))
 	}
 	}
@@ -145,5 +249,6 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string
 	}
 	}
 	res[ExporterResponseManifestDesc] = string(descJSON)
 	res[ExporterResponseManifestDesc] = string(descJSON)
 	mfstDone(nil)
 	mfstDone(nil)
+
 	return res, nil
 	return res, nil
 }
 }

+ 3 - 3
vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go

@@ -15,6 +15,7 @@ import (
 	v1 "github.com/moby/buildkit/cache/remotecache/v1"
 	v1 "github.com/moby/buildkit/cache/remotecache/v1"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/solver"
 	"github.com/moby/buildkit/solver"
+	"github.com/moby/buildkit/util/bklog"
 	"github.com/moby/buildkit/util/compression"
 	"github.com/moby/buildkit/util/compression"
 	"github.com/moby/buildkit/util/progress"
 	"github.com/moby/buildkit/util/progress"
 	"github.com/moby/buildkit/util/tracing"
 	"github.com/moby/buildkit/util/tracing"
@@ -22,13 +23,12 @@ import (
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	actionscache "github.com/tonistiigi/go-actions-cache"
 	actionscache "github.com/tonistiigi/go-actions-cache"
 	"golang.org/x/sync/errgroup"
 	"golang.org/x/sync/errgroup"
 )
 )
 
 
 func init() {
 func init() {
-	actionscache.Log = logrus.Debugf
+	actionscache.Log = bklog.L.Debugf
 }
 }
 
 
 const (
 const (
@@ -92,7 +92,7 @@ func NewExporter(c *Config) (remotecache.Exporter, error) {
 }
 }
 
 
 func (*exporter) Name() string {
 func (*exporter) Name() string {
-	return "exporting to GitHub cache"
+	return "exporting to GitHub Actions Cache"
 }
 }
 
 
 func (ce *exporter) Config() remotecache.Config {
 func (ce *exporter) Config() remotecache.Config {

+ 42 - 12
vendor/github.com/moby/buildkit/cache/remotecache/import.go

@@ -3,6 +3,7 @@ package remotecache
 import (
 import (
 	"context"
 	"context"
 	"encoding/json"
 	"encoding/json"
+	"fmt"
 	"io"
 	"io"
 	"sync"
 	"sync"
 	"time"
 	"time"
@@ -12,12 +13,13 @@ import (
 	v1 "github.com/moby/buildkit/cache/remotecache/v1"
 	v1 "github.com/moby/buildkit/cache/remotecache/v1"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/solver"
 	"github.com/moby/buildkit/solver"
+	"github.com/moby/buildkit/util/bklog"
 	"github.com/moby/buildkit/util/imageutil"
 	"github.com/moby/buildkit/util/imageutil"
+	"github.com/moby/buildkit/util/progress"
 	"github.com/moby/buildkit/worker"
 	"github.com/moby/buildkit/worker"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 	"golang.org/x/sync/errgroup"
 	"golang.org/x/sync/errgroup"
 )
 )
 
 
@@ -47,24 +49,52 @@ func (ci *contentCacheImporter) Resolve(ctx context.Context, desc ocispecs.Descr
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	var mfst ocispecs.Index
-	if err := json.Unmarshal(dt, &mfst); err != nil {
+	manifestType, err := imageutil.DetectManifestBlobMediaType(dt)
+	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	allLayers := v1.DescriptorProvider{}
+	layerDone := progress.OneOff(ctx, fmt.Sprintf("inferred cache manifest type: %s", manifestType))
+	layerDone(nil)
 
 
+	allLayers := v1.DescriptorProvider{}
 	var configDesc ocispecs.Descriptor
 	var configDesc ocispecs.Descriptor
 
 
-	for _, m := range mfst.Manifests {
-		if m.MediaType == v1.CacheConfigMediaTypeV0 {
-			configDesc = m
-			continue
+	switch manifestType {
+	case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex:
+		var mfst ocispecs.Index
+		if err := json.Unmarshal(dt, &mfst); err != nil {
+			return nil, err
 		}
 		}
-		allLayers[m.Digest] = v1.DescriptorProviderPair{
-			Descriptor: m,
-			Provider:   ci.provider,
+
+		for _, m := range mfst.Manifests {
+			if m.MediaType == v1.CacheConfigMediaTypeV0 {
+				configDesc = m
+				continue
+			}
+			allLayers[m.Digest] = v1.DescriptorProviderPair{
+				Descriptor: m,
+				Provider:   ci.provider,
+			}
 		}
 		}
+	case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest:
+		var mfst ocispecs.Manifest
+		if err := json.Unmarshal(dt, &mfst); err != nil {
+			return nil, err
+		}
+
+		if mfst.Config.MediaType == v1.CacheConfigMediaTypeV0 {
+			configDesc = mfst.Config
+		}
+		for _, m := range mfst.Layers {
+			allLayers[m.Digest] = v1.DescriptorProviderPair{
+				Descriptor: m,
+				Provider:   ci.provider,
+			}
+		}
+	default:
+		err = errors.Wrapf(err, "unsupported or uninferrable manifest type")
+		return nil, err
 	}
 	}
 
 
 	if dsls, ok := ci.provider.(DistributionSourceLabelSetter); ok {
 	if dsls, ok := ci.provider.(DistributionSourceLabelSetter); ok {
@@ -162,7 +192,7 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte
 				}
 				}
 
 
 				if len(img.Rootfs.DiffIDs) != len(m.Layers) {
 				if len(img.Rootfs.DiffIDs) != len(m.Layers) {
-					logrus.Warnf("invalid image with mismatching manifest and config")
+					bklog.G(ctx).Warnf("invalid image with mismatching manifest and config")
 					return nil
 					return nil
 				}
 				}
 
 

+ 2 - 2
vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go

@@ -8,10 +8,10 @@ import (
 	v1 "github.com/moby/buildkit/cache/remotecache/v1"
 	v1 "github.com/moby/buildkit/cache/remotecache/v1"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/solver"
 	"github.com/moby/buildkit/solver"
+	"github.com/moby/buildkit/util/bklog"
 	"github.com/moby/buildkit/util/compression"
 	"github.com/moby/buildkit/util/compression"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 )
 
 
 func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc {
 func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc {
@@ -85,7 +85,7 @@ func (ce *exporter) ExportForLayers(ctx context.Context, layers []digest.Digest)
 	}
 	}
 
 
 	if len(cfg.Layers) == 0 {
 	if len(cfg.Layers) == 0 {
-		logrus.Warn("failed to match any cache with layers")
+		bklog.G(ctx).Warn("failed to match any cache with layers")
 		return nil, nil
 		return nil, nil
 	}
 	}
 
 

+ 19 - 40
vendor/github.com/moby/buildkit/cache/remotecache/local/local.go

@@ -19,13 +19,19 @@ const (
 	attrDigest           = "digest"
 	attrDigest           = "digest"
 	attrSrc              = "src"
 	attrSrc              = "src"
 	attrDest             = "dest"
 	attrDest             = "dest"
+	attrImageManifest    = "image-manifest"
 	attrOCIMediatypes    = "oci-mediatypes"
 	attrOCIMediatypes    = "oci-mediatypes"
 	contentStoreIDPrefix = "local:"
 	contentStoreIDPrefix = "local:"
-	attrLayerCompression = "compression"
-	attrForceCompression = "force-compression"
-	attrCompressionLevel = "compression-level"
 )
 )
 
 
+type exporter struct {
+	remotecache.Exporter
+}
+
+func (*exporter) Name() string {
+	return "exporting cache to client directory"
+}
+
 // ResolveCacheExporterFunc for "local" cache exporter.
 // ResolveCacheExporterFunc for "local" cache exporter.
 func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExporterFunc {
 func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExporterFunc {
 	return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) {
 	return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) {
@@ -33,7 +39,7 @@ func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExpor
 		if store == "" {
 		if store == "" {
 			return nil, errors.New("local cache exporter requires dest")
 			return nil, errors.New("local cache exporter requires dest")
 		}
 		}
-		compressionConfig, err := attrsToCompression(attrs)
+		compressionConfig, err := compression.ParseAttributes(attrs)
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
@@ -45,12 +51,20 @@ func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExpor
 			}
 			}
 			ociMediatypes = b
 			ociMediatypes = b
 		}
 		}
+		imageManifest := false
+		if v, ok := attrs[attrImageManifest]; ok {
+			b, err := strconv.ParseBool(v)
+			if err != nil {
+				return nil, errors.Wrapf(err, "failed to parse %s", attrImageManifest)
+			}
+			imageManifest = b
+		}
 		csID := contentStoreIDPrefix + store
 		csID := contentStoreIDPrefix + store
 		cs, err := getContentStore(ctx, sm, g, csID)
 		cs, err := getContentStore(ctx, sm, g, csID)
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
-		return remotecache.NewExporter(cs, "", ociMediatypes, *compressionConfig), nil
+		return &exporter{remotecache.NewExporter(cs, "", ociMediatypes, imageManifest, compressionConfig)}, nil
 	}
 	}
 }
 }
 
 
@@ -109,38 +123,3 @@ type unlazyProvider struct {
 func (p *unlazyProvider) UnlazySession(desc ocispecs.Descriptor) session.Group {
 func (p *unlazyProvider) UnlazySession(desc ocispecs.Descriptor) session.Group {
 	return p.s
 	return p.s
 }
 }
-
-func attrsToCompression(attrs map[string]string) (*compression.Config, error) {
-	var compressionType compression.Type
-	if v, ok := attrs[attrLayerCompression]; ok {
-		c, err := compression.Parse(v)
-		if err != nil {
-			return nil, err
-		}
-		compressionType = c
-	} else {
-		compressionType = compression.Default
-	}
-	compressionConfig := compression.New(compressionType)
-	if v, ok := attrs[attrForceCompression]; ok {
-		var force bool
-		if v == "" {
-			force = true
-		} else {
-			b, err := strconv.ParseBool(v)
-			if err != nil {
-				return nil, errors.Wrapf(err, "non-bool value %s specified for %s", v, attrForceCompression)
-			}
-			force = b
-		}
-		compressionConfig = compressionConfig.SetForce(force)
-	}
-	if v, ok := attrs[attrCompressionLevel]; ok {
-		ii, err := strconv.ParseInt(v, 10, 64)
-		if err != nil {
-			return nil, errors.Wrapf(err, "non-integer value %s specified for %s", v, attrCompressionLevel)
-		}
-		compressionConfig = compressionConfig.SetLevel(int(ii))
-	}
-	return &compressionConfig, nil
-}

+ 68 - 49
vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go

@@ -15,34 +15,43 @@ import (
 	"github.com/moby/buildkit/util/estargz"
 	"github.com/moby/buildkit/util/estargz"
 	"github.com/moby/buildkit/util/push"
 	"github.com/moby/buildkit/util/push"
 	"github.com/moby/buildkit/util/resolver"
 	"github.com/moby/buildkit/util/resolver"
+	resolverconfig "github.com/moby/buildkit/util/resolver/config"
 	"github.com/moby/buildkit/util/resolver/limited"
 	"github.com/moby/buildkit/util/resolver/limited"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
 
 
-func canonicalizeRef(rawRef string) (string, error) {
+func canonicalizeRef(rawRef string) (reference.Named, error) {
 	if rawRef == "" {
 	if rawRef == "" {
-		return "", errors.New("missing ref")
+		return nil, errors.New("missing ref")
 	}
 	}
 	parsed, err := reference.ParseNormalizedNamed(rawRef)
 	parsed, err := reference.ParseNormalizedNamed(rawRef)
 	if err != nil {
 	if err != nil {
-		return "", err
+		return nil, err
 	}
 	}
-	return reference.TagNameOnly(parsed).String(), nil
+	parsed = reference.TagNameOnly(parsed)
+	return parsed, nil
 }
 }
 
 
 const (
 const (
-	attrRef              = "ref"
-	attrOCIMediatypes    = "oci-mediatypes"
-	attrLayerCompression = "compression"
-	attrForceCompression = "force-compression"
-	attrCompressionLevel = "compression-level"
+	attrRef           = "ref"
+	attrImageManifest = "image-manifest"
+	attrOCIMediatypes = "oci-mediatypes"
+	attrInsecure      = "registry.insecure"
 )
 )
 
 
+type exporter struct {
+	remotecache.Exporter
+}
+
+func (*exporter) Name() string {
+	return "exporting cache to registry"
+}
+
 func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) remotecache.ResolveCacheExporterFunc {
 func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) remotecache.ResolveCacheExporterFunc {
 	return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) {
 	return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) {
-		compressionConfig, err := attrsToCompression(attrs)
+		compressionConfig, err := compression.ParseAttributes(attrs)
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
@@ -50,6 +59,7 @@ func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) r
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
+		refString := ref.String()
 		ociMediatypes := true
 		ociMediatypes := true
 		if v, ok := attrs[attrOCIMediatypes]; ok {
 		if v, ok := attrs[attrOCIMediatypes]; ok {
 			b, err := strconv.ParseBool(v)
 			b, err := strconv.ParseBool(v)
@@ -58,12 +68,30 @@ func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) r
 			}
 			}
 			ociMediatypes = b
 			ociMediatypes = b
 		}
 		}
-		remote := resolver.DefaultPool.GetResolver(hosts, ref, "push", sm, g)
-		pusher, err := push.Pusher(ctx, remote, ref)
+		imageManifest := false
+		if v, ok := attrs[attrImageManifest]; ok {
+			b, err := strconv.ParseBool(v)
+			if err != nil {
+				return nil, errors.Wrapf(err, "failed to parse %s", attrImageManifest)
+			}
+			imageManifest = b
+		}
+		insecure := false
+		if v, ok := attrs[attrInsecure]; ok {
+			b, err := strconv.ParseBool(v)
+			if err != nil {
+				return nil, errors.Wrapf(err, "failed to parse %s", attrInsecure)
+			}
+			insecure = b
+		}
+
+		scope, hosts := registryConfig(hosts, ref, "push", insecure)
+		remote := resolver.DefaultPool.GetResolver(hosts, refString, scope, sm, g)
+		pusher, err := push.Pusher(ctx, remote, refString)
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
-		return remotecache.NewExporter(contentutil.FromPusher(pusher), ref, ociMediatypes, *compressionConfig), nil
+		return &exporter{remotecache.NewExporter(contentutil.FromPusher(pusher), refString, ociMediatypes, imageManifest, compressionConfig)}, nil
 	}
 	}
 }
 }
 
 
@@ -73,8 +101,19 @@ func ResolveCacheImporterFunc(sm *session.Manager, cs content.Store, hosts docke
 		if err != nil {
 		if err != nil {
 			return nil, ocispecs.Descriptor{}, err
 			return nil, ocispecs.Descriptor{}, err
 		}
 		}
-		remote := resolver.DefaultPool.GetResolver(hosts, ref, "pull", sm, g)
-		xref, desc, err := remote.Resolve(ctx, ref)
+		refString := ref.String()
+		insecure := false
+		if v, ok := attrs[attrInsecure]; ok {
+			b, err := strconv.ParseBool(v)
+			if err != nil {
+				return nil, ocispecs.Descriptor{}, errors.Wrapf(err, "failed to parse %s", attrInsecure)
+			}
+			insecure = b
+		}
+
+		scope, hosts := registryConfig(hosts, ref, "pull", insecure)
+		remote := resolver.DefaultPool.GetResolver(hosts, refString, scope, sm, g)
+		xref, desc, err := remote.Resolve(ctx, refString)
 		if err != nil {
 		if err != nil {
 			return nil, ocispecs.Descriptor{}, err
 			return nil, ocispecs.Descriptor{}, err
 		}
 		}
@@ -83,8 +122,8 @@ func ResolveCacheImporterFunc(sm *session.Manager, cs content.Store, hosts docke
 			return nil, ocispecs.Descriptor{}, err
 			return nil, ocispecs.Descriptor{}, err
 		}
 		}
 		src := &withDistributionSourceLabel{
 		src := &withDistributionSourceLabel{
-			Provider: contentutil.FromFetcher(limited.Default.WrapFetcher(fetcher, ref)),
-			ref:      ref,
+			Provider: contentutil.FromFetcher(limited.Default.WrapFetcher(fetcher, refString)),
+			ref:      refString,
 			source:   cs,
 			source:   cs,
 		}
 		}
 		return remotecache.NewImporter(src), desc, nil
 		return remotecache.NewImporter(src), desc, nil
@@ -130,37 +169,17 @@ func (dsl *withDistributionSourceLabel) SnapshotLabels(descs []ocispecs.Descript
 	return labels
 	return labels
 }
 }
 
 
-func attrsToCompression(attrs map[string]string) (*compression.Config, error) {
-	var compressionType compression.Type
-	if v, ok := attrs[attrLayerCompression]; ok {
-		c, err := compression.Parse(v)
-		if err != nil {
-			return nil, err
-		}
-		compressionType = c
-	} else {
-		compressionType = compression.Default
-	}
-	compressionConfig := compression.New(compressionType)
-	if v, ok := attrs[attrForceCompression]; ok {
-		var force bool
-		if v == "" {
-			force = true
-		} else {
-			b, err := strconv.ParseBool(v)
-			if err != nil {
-				return nil, errors.Wrapf(err, "non-bool value %s specified for %s", v, attrForceCompression)
-			}
-			force = b
-		}
-		compressionConfig = compressionConfig.SetForce(force)
-	}
-	if v, ok := attrs[attrCompressionLevel]; ok {
-		ii, err := strconv.ParseInt(v, 10, 64)
-		if err != nil {
-			return nil, errors.Wrapf(err, "non-integer value %s specified for %s", v, attrCompressionLevel)
-		}
-		compressionConfig = compressionConfig.SetLevel(int(ii))
+func registryConfig(hosts docker.RegistryHosts, ref reference.Named, scope string, insecure bool) (string, docker.RegistryHosts) {
+	if insecure {
+		insecureTrue := true
+		httpTrue := true
+		hosts = resolver.NewRegistryConfig(map[string]resolverconfig.RegistryConfig{
+			reference.Domain(ref): {
+				Insecure:  &insecureTrue,
+				PlainHTTP: &httpTrue,
+			},
+		})
+		scope += ":insecure"
 	}
 	}
-	return &compressionConfig, nil
+	return scope, hosts
 }
 }

+ 1 - 1
vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go

@@ -291,7 +291,7 @@ func (cs *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheR
 	return nil, errors.WithStack(solver.ErrNotFound)
 	return nil, errors.WithStack(solver.ErrNotFound)
 }
 }
 
 
-func (cs *cacheResultStorage) Exists(id string) bool {
+func (cs *cacheResultStorage) Exists(ctx context.Context, id string) bool {
 	return cs.byResultID(id) != nil
 	return cs.byResultID(id) != nil
 }
 }
 
 

+ 3 - 3
vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go

@@ -39,7 +39,7 @@ func (c *CacheChains) Visited(v interface{}) bool {
 	return ok
 	return ok
 }
 }
 
 
-func (c *CacheChains) normalize() error {
+func (c *CacheChains) normalize(ctx context.Context) error {
 	st := &normalizeState{
 	st := &normalizeState{
 		added: map[*item]*item{},
 		added: map[*item]*item{},
 		links: map[*item]map[nlink]map[digest.Digest]struct{}{},
 		links: map[*item]map[nlink]map[digest.Digest]struct{}{},
@@ -66,7 +66,7 @@ func (c *CacheChains) normalize() error {
 		}
 		}
 	}
 	}
 
 
-	st.removeLoops()
+	st.removeLoops(ctx)
 
 
 	items := make([]*item, 0, len(st.byKey))
 	items := make([]*item, 0, len(st.byKey))
 	for _, it := range st.byKey {
 	for _, it := range st.byKey {
@@ -77,7 +77,7 @@ func (c *CacheChains) normalize() error {
 }
 }
 
 
 func (c *CacheChains) Marshal(ctx context.Context) (*CacheConfig, DescriptorProvider, error) {
 func (c *CacheChains) Marshal(ctx context.Context) (*CacheConfig, DescriptorProvider, error) {
-	if err := c.normalize(); err != nil {
+	if err := c.normalize(ctx); err != nil {
 		return nil, nil, err
 		return nil, nil, err
 	}
 	}
 
 

+ 6 - 6
vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go

@@ -6,10 +6,10 @@ import (
 	"sort"
 	"sort"
 
 
 	"github.com/moby/buildkit/solver"
 	"github.com/moby/buildkit/solver"
+	"github.com/moby/buildkit/util/bklog"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
 )
 )
 
 
 // sortConfig sorts the config structure to make sure it is deterministic
 // sortConfig sorts the config structure to make sure it is deterministic
@@ -128,7 +128,7 @@ type normalizeState struct {
 	next  int
 	next  int
 }
 }
 
 
-func (s *normalizeState) removeLoops() {
+func (s *normalizeState) removeLoops(ctx context.Context) {
 	roots := []digest.Digest{}
 	roots := []digest.Digest{}
 	for dgst, it := range s.byKey {
 	for dgst, it := range s.byKey {
 		if len(it.links) == 0 {
 		if len(it.links) == 0 {
@@ -139,11 +139,11 @@ func (s *normalizeState) removeLoops() {
 	visited := map[digest.Digest]struct{}{}
 	visited := map[digest.Digest]struct{}{}
 
 
 	for _, d := range roots {
 	for _, d := range roots {
-		s.checkLoops(d, visited)
+		s.checkLoops(ctx, d, visited)
 	}
 	}
 }
 }
 
 
-func (s *normalizeState) checkLoops(d digest.Digest, visited map[digest.Digest]struct{}) {
+func (s *normalizeState) checkLoops(ctx context.Context, d digest.Digest, visited map[digest.Digest]struct{}) {
 	it, ok := s.byKey[d]
 	it, ok := s.byKey[d]
 	if !ok {
 	if !ok {
 		return
 		return
@@ -165,11 +165,11 @@ func (s *normalizeState) checkLoops(d digest.Digest, visited map[digest.Digest]s
 					continue
 					continue
 				}
 				}
 				if !it2.removeLink(it) {
 				if !it2.removeLink(it) {
-					logrus.Warnf("failed to remove looping cache key %s %s", d, id)
+					bklog.G(ctx).Warnf("failed to remove looping cache key %s %s", d, id)
 				}
 				}
 				delete(links[l], id)
 				delete(links[l], id)
 			} else {
 			} else {
-				s.checkLoops(id, visited)
+				s.checkLoops(ctx, id, visited)
 			}
 			}
 		}
 		}
 	}
 	}

+ 18 - 14
vendor/github.com/moby/buildkit/cache/util/fsutil.go

@@ -57,21 +57,25 @@ func ReadFile(ctx context.Context, mount snapshot.Mountable, req ReadRequest) ([
 			return errors.WithStack(err)
 			return errors.WithStack(err)
 		}
 		}
 
 
-		if req.Range == nil {
-			dt, err = os.ReadFile(fp)
-			if err != nil {
-				return errors.WithStack(err)
-			}
-		} else {
-			f, err := os.Open(fp)
-			if err != nil {
-				return errors.WithStack(err)
-			}
-			dt, err = io.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length)))
-			f.Close()
-			if err != nil {
-				return errors.WithStack(err)
+		f, err := os.Open(fp)
+		if err != nil {
+			// The filename here is internal to the mount, so we can restore
+			// the request base path for error reporting.
+			// See os.DirFS.Open for details.
+			if pe, ok := err.(*os.PathError); ok {
+				pe.Path = req.Filename
 			}
 			}
+			return errors.WithStack(err)
+		}
+		defer f.Close()
+
+		var rdr io.Reader = f
+		if req.Range != nil {
+			rdr = io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length))
+		}
+		dt, err = io.ReadAll(rdr)
+		if err != nil {
+			return errors.WithStack(err)
 		}
 		}
 		return nil
 		return nil
 	})
 	})

+ 147 - 47
vendor/github.com/moby/buildkit/client/client.go

@@ -11,7 +11,6 @@ import (
 
 
 	contentapi "github.com/containerd/containerd/api/services/content/v1"
 	contentapi "github.com/containerd/containerd/api/services/content/v1"
 	"github.com/containerd/containerd/defaults"
 	"github.com/containerd/containerd/defaults"
-	grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
 	controlapi "github.com/moby/buildkit/api/services/control"
 	controlapi "github.com/moby/buildkit/api/services/control"
 	"github.com/moby/buildkit/client/connhelper"
 	"github.com/moby/buildkit/client/connhelper"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session"
@@ -26,6 +25,7 @@ import (
 	sdktrace "go.opentelemetry.io/otel/sdk/trace"
 	sdktrace "go.opentelemetry.io/otel/sdk/trace"
 	"go.opentelemetry.io/otel/trace"
 	"go.opentelemetry.io/otel/trace"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/credentials/insecure"
 	"google.golang.org/grpc/credentials/insecure"
 )
 )
@@ -35,7 +35,9 @@ type Client struct {
 	sessionDialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error)
 	sessionDialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error)
 }
 }
 
 
-type ClientOpt interface{}
+type ClientOpt interface {
+	isClientOpt()
+}
 
 
 // New returns a new buildkit client. Address can be empty for the system-default address.
 // New returns a new buildkit client. Address can be empty for the system-default address.
 func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) {
 func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) {
@@ -44,8 +46,6 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
 		grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
 		grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
 	}
 	}
 	needDialer := true
 	needDialer := true
-	needWithInsecure := true
-	tlsServerName := ""
 
 
 	var unary []grpc.UnaryClientInterceptor
 	var unary []grpc.UnaryClientInterceptor
 	var stream []grpc.StreamClientInterceptor
 	var stream []grpc.StreamClientInterceptor
@@ -54,19 +54,18 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
 	var tracerProvider trace.TracerProvider
 	var tracerProvider trace.TracerProvider
 	var tracerDelegate TracerDelegate
 	var tracerDelegate TracerDelegate
 	var sessionDialer func(context.Context, string, map[string][]string) (net.Conn, error)
 	var sessionDialer func(context.Context, string, map[string][]string) (net.Conn, error)
+	var customDialOptions []grpc.DialOption
+	var creds *withCredentials
 
 
 	for _, o := range opts {
 	for _, o := range opts {
 		if _, ok := o.(*withFailFast); ok {
 		if _, ok := o.(*withFailFast); ok {
 			gopts = append(gopts, grpc.FailOnNonTempDialError(true))
 			gopts = append(gopts, grpc.FailOnNonTempDialError(true))
 		}
 		}
 		if credInfo, ok := o.(*withCredentials); ok {
 		if credInfo, ok := o.(*withCredentials); ok {
-			opt, err := loadCredentials(credInfo)
-			if err != nil {
-				return nil, err
+			if creds == nil {
+				creds = &withCredentials{}
 			}
 			}
-			gopts = append(gopts, opt)
-			needWithInsecure = false
-			tlsServerName = credInfo.ServerName
+			creds = creds.merge(credInfo)
 		}
 		}
 		if wt, ok := o.(*withTracer); ok {
 		if wt, ok := o.(*withTracer); ok {
 			customTracer = true
 			customTracer = true
@@ -82,6 +81,19 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
 		if sd, ok := o.(*withSessionDialer); ok {
 		if sd, ok := o.(*withSessionDialer); ok {
 			sessionDialer = sd.dialer
 			sessionDialer = sd.dialer
 		}
 		}
+		if opt, ok := o.(*withGRPCDialOption); ok {
+			customDialOptions = append(customDialOptions, opt.opt)
+		}
+	}
+
+	if creds == nil {
+		gopts = append(gopts, grpc.WithTransportCredentials(insecure.NewCredentials()))
+	} else {
+		credOpts, err := loadCredentials(creds)
+		if err != nil {
+			return nil, err
+		}
+		gopts = append(gopts, credOpts)
 	}
 	}
 
 
 	if !customTracer {
 	if !customTracer {
@@ -103,9 +115,6 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
 		}
 		}
 		gopts = append(gopts, grpc.WithContextDialer(dialFn))
 		gopts = append(gopts, grpc.WithContextDialer(dialFn))
 	}
 	}
-	if needWithInsecure {
-		gopts = append(gopts, grpc.WithTransportCredentials(insecure.NewCredentials()))
-	}
 	if address == "" {
 	if address == "" {
 		address = appdefaults.Address
 		address = appdefaults.Address
 	}
 	}
@@ -117,7 +126,10 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
 	//   ref: https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.3
 	//   ref: https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.3
 	// - However, when TLS specified, grpc-go requires it must match
 	// - However, when TLS specified, grpc-go requires it must match
 	//   with its servername specified for certificate validation.
 	//   with its servername specified for certificate validation.
-	authority := tlsServerName
+	var authority string
+	if creds != nil && creds.serverName != "" {
+		authority = creds.serverName
+	}
 	if authority == "" {
 	if authority == "" {
 		// authority as hostname from target address
 		// authority as hostname from target address
 		uri, err := url.Parse(address)
 		uri, err := url.Parse(address)
@@ -131,17 +143,9 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
 	unary = append(unary, grpcerrors.UnaryClientInterceptor)
 	unary = append(unary, grpcerrors.UnaryClientInterceptor)
 	stream = append(stream, grpcerrors.StreamClientInterceptor)
 	stream = append(stream, grpcerrors.StreamClientInterceptor)
 
 
-	if len(unary) == 1 {
-		gopts = append(gopts, grpc.WithUnaryInterceptor(unary[0]))
-	} else if len(unary) > 1 {
-		gopts = append(gopts, grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unary...)))
-	}
-
-	if len(stream) == 1 {
-		gopts = append(gopts, grpc.WithStreamInterceptor(stream[0]))
-	} else if len(stream) > 1 {
-		gopts = append(gopts, grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(stream...)))
-	}
+	gopts = append(gopts, grpc.WithChainUnaryInterceptor(unary...))
+	gopts = append(gopts, grpc.WithChainStreamInterceptor(stream...))
+	gopts = append(gopts, customDialOptions...)
 
 
 	conn, err := grpc.DialContext(ctx, address, gopts...)
 	conn, err := grpc.DialContext(ctx, address, gopts...)
 	if err != nil {
 	if err != nil {
@@ -181,12 +185,27 @@ func (c *Client) Dialer() session.Dialer {
 	return grpchijack.Dialer(c.ControlClient())
 	return grpchijack.Dialer(c.ControlClient())
 }
 }
 
 
+func (c *Client) Wait(ctx context.Context) error {
+	opts := []grpc.CallOption{grpc.WaitForReady(true)}
+	_, err := c.ControlClient().Info(ctx, &controlapi.InfoRequest{}, opts...)
+	if err != nil {
+		if code := grpcerrors.Code(err); code == codes.Unimplemented {
+			// only buildkit v0.11+ supports the info api, but an unimplemented
+			// response error is still a response so we can ignore it
+			return nil
+		}
+	}
+	return err
+}
+
 func (c *Client) Close() error {
 func (c *Client) Close() error {
 	return c.conn.Close()
 	return c.conn.Close()
 }
 }
 
 
 type withFailFast struct{}
 type withFailFast struct{}
 
 
+func (*withFailFast) isClientOpt() {}
+
 func WithFailFast() ClientOpt {
 func WithFailFast() ClientOpt {
 	return &withFailFast{}
 	return &withFailFast{}
 }
 }
@@ -195,50 +214,115 @@ type withDialer struct {
 	dialer func(context.Context, string) (net.Conn, error)
 	dialer func(context.Context, string) (net.Conn, error)
 }
 }
 
 
+func (*withDialer) isClientOpt() {}
+
 func WithContextDialer(df func(context.Context, string) (net.Conn, error)) ClientOpt {
 func WithContextDialer(df func(context.Context, string) (net.Conn, error)) ClientOpt {
 	return &withDialer{dialer: df}
 	return &withDialer{dialer: df}
 }
 }
 
 
 type withCredentials struct {
 type withCredentials struct {
-	ServerName string
-	CACert     string
-	Cert       string
-	Key        string
+	// server options
+	serverName   string
+	caCert       string
+	caCertSystem bool
+
+	// client options
+	cert string
+	key  string
 }
 }
 
 
+func (opts *withCredentials) merge(opts2 *withCredentials) *withCredentials {
+	result := *opts
+	if opts2 == nil {
+		return &result
+	}
+
+	// server options
+	if opts2.serverName != "" {
+		result.serverName = opts2.serverName
+	}
+	if opts2.caCert != "" {
+		result.caCert = opts2.caCert
+	}
+	if opts2.caCertSystem {
+		result.caCertSystem = opts2.caCertSystem
+	}
+
+	// client options
+	if opts2.cert != "" {
+		result.cert = opts2.cert
+	}
+	if opts2.key != "" {
+		result.key = opts2.key
+	}
+
+	return &result
+}
+
+func (*withCredentials) isClientOpt() {}
+
 // WithCredentials configures the TLS parameters of the client.
 // WithCredentials configures the TLS parameters of the client.
 // Arguments:
 // Arguments:
-// * serverName: specifies the name of the target server
-// * ca:				 specifies the filepath of the CA certificate to use for verification
-// * cert:			 specifies the filepath of the client certificate
-// * key:				 specifies the filepath of the client key
-func WithCredentials(serverName, ca, cert, key string) ClientOpt {
-	return &withCredentials{serverName, ca, cert, key}
+// * cert:	specifies the filepath of the client certificate
+// * key:	specifies the filepath of the client key
+func WithCredentials(cert, key string) ClientOpt {
+	return &withCredentials{
+		cert: cert,
+		key:  key,
+	}
+}
+
+// WithServerConfig configures the TLS parameters to connect to the server.
+// Arguments:
+// * serverName:	specifies the server name to verify the hostname
+// * caCert:		specifies the filepath of the CA certificate
+func WithServerConfig(serverName, caCert string) ClientOpt {
+	return &withCredentials{
+		serverName: serverName,
+		caCert:     caCert,
+	}
+}
+
+// WithServerConfigSystem configures the TLS parameters to connect to the
+// server, using the system's certificate pool.
+func WithServerConfigSystem(serverName string) ClientOpt {
+	return &withCredentials{
+		serverName:   serverName,
+		caCertSystem: true,
+	}
 }
 }
 
 
 func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
 func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
-	ca, err := os.ReadFile(opts.CACert)
-	if err != nil {
-		return nil, errors.Wrap(err, "could not read ca certificate")
+	cfg := &tls.Config{}
+
+	if opts.caCertSystem {
+		cfg.RootCAs, _ = x509.SystemCertPool()
+	}
+	if cfg.RootCAs == nil {
+		cfg.RootCAs = x509.NewCertPool()
 	}
 	}
 
 
-	certPool := x509.NewCertPool()
-	if ok := certPool.AppendCertsFromPEM(ca); !ok {
-		return nil, errors.New("failed to append ca certs")
+	if opts.caCert != "" {
+		ca, err := os.ReadFile(opts.caCert)
+		if err != nil {
+			return nil, errors.Wrap(err, "could not read ca certificate")
+		}
+		if ok := cfg.RootCAs.AppendCertsFromPEM(ca); !ok {
+			return nil, errors.New("failed to append ca certs")
+		}
 	}
 	}
 
 
-	cfg := &tls.Config{
-		ServerName: opts.ServerName,
-		RootCAs:    certPool,
+	if opts.serverName != "" {
+		cfg.ServerName = opts.serverName
 	}
 	}
 
 
 	// we will produce an error if the user forgot about either cert or key if at least one is specified
 	// we will produce an error if the user forgot about either cert or key if at least one is specified
-	if opts.Cert != "" || opts.Key != "" {
-		cert, err := tls.LoadX509KeyPair(opts.Cert, opts.Key)
+	if opts.cert != "" || opts.key != "" {
+		cert, err := tls.LoadX509KeyPair(opts.cert, opts.key)
 		if err != nil {
 		if err != nil {
 			return nil, errors.Wrap(err, "could not read certificate/key")
 			return nil, errors.Wrap(err, "could not read certificate/key")
 		}
 		}
-		cfg.Certificates = []tls.Certificate{cert}
+		cfg.Certificates = append(cfg.Certificates, cert)
 	}
 	}
 
 
 	return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil
 	return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil
@@ -252,6 +336,8 @@ type withTracer struct {
 	tp trace.TracerProvider
 	tp trace.TracerProvider
 }
 }
 
 
+func (w *withTracer) isClientOpt() {}
+
 type TracerDelegate interface {
 type TracerDelegate interface {
 	SetSpanExporter(context.Context, sdktrace.SpanExporter) error
 	SetSpanExporter(context.Context, sdktrace.SpanExporter) error
 }
 }
@@ -266,6 +352,8 @@ type withTracerDelegate struct {
 	TracerDelegate
 	TracerDelegate
 }
 }
 
 
+func (w *withTracerDelegate) isClientOpt() {}
+
 func WithSessionDialer(dialer func(context.Context, string, map[string][]string) (net.Conn, error)) ClientOpt {
 func WithSessionDialer(dialer func(context.Context, string, map[string][]string) (net.Conn, error)) ClientOpt {
 	return &withSessionDialer{dialer}
 	return &withSessionDialer{dialer}
 }
 }
@@ -274,6 +362,8 @@ type withSessionDialer struct {
 	dialer func(context.Context, string, map[string][]string) (net.Conn, error)
 	dialer func(context.Context, string, map[string][]string) (net.Conn, error)
 }
 }
 
 
+func (w *withSessionDialer) isClientOpt() {}
+
 func resolveDialer(address string) (func(context.Context, string) (net.Conn, error), error) {
 func resolveDialer(address string) (func(context.Context, string) (net.Conn, error), error) {
 	ch, err := connhelper.GetConnectionHelper(address)
 	ch, err := connhelper.GetConnectionHelper(address)
 	if err != nil {
 	if err != nil {
@@ -294,3 +384,13 @@ func filterInterceptor(intercept grpc.UnaryClientInterceptor) grpc.UnaryClientIn
 		return intercept(ctx, method, req, reply, cc, invoker, opts...)
 		return intercept(ctx, method, req, reply, cc, invoker, opts...)
 	}
 	}
 }
 }
+
+type withGRPCDialOption struct {
+	opt grpc.DialOption
+}
+
+func (*withGRPCDialOption) isClientOpt() {}
+
+func WithGRPCDialOption(opt grpc.DialOption) ClientOpt {
+	return &withGRPCDialOption{opt}
+}

+ 2 - 2
vendor/github.com/moby/buildkit/client/llb/async.go

@@ -15,7 +15,7 @@ type asyncState struct {
 	target State
 	target State
 	set    bool
 	set    bool
 	err    error
 	err    error
-	g      flightcontrol.Group
+	g      flightcontrol.Group[State]
 }
 }
 
 
 func (as *asyncState) Output() Output {
 func (as *asyncState) Output() Output {
@@ -53,7 +53,7 @@ func (as *asyncState) ToInput(ctx context.Context, c *Constraints) (*pb.Input, e
 }
 }
 
 
 func (as *asyncState) Do(ctx context.Context, c *Constraints) error {
 func (as *asyncState) Do(ctx context.Context, c *Constraints) error {
-	_, err := as.g.Do(ctx, "", func(ctx context.Context) (interface{}, error) {
+	_, err := as.g.Do(ctx, "", func(ctx context.Context) (State, error) {
 		if as.set {
 		if as.set {
 			return as.target, as.err
 			return as.target, as.err
 		}
 		}

+ 18 - 6
vendor/github.com/moby/buildkit/client/llb/definition.go

@@ -24,7 +24,7 @@ type DefinitionOp struct {
 	platforms  map[digest.Digest]*ocispecs.Platform
 	platforms  map[digest.Digest]*ocispecs.Platform
 	dgst       digest.Digest
 	dgst       digest.Digest
 	index      pb.OutputIndex
 	index      pb.OutputIndex
-	inputCache map[digest.Digest][]*DefinitionOp
+	inputCache *sync.Map // shared and written among DefinitionOps so avoid race on this map using sync.Map
 }
 }
 
 
 // NewDefinitionOp returns a new operation from a marshalled definition.
 // NewDefinitionOp returns a new operation from a marshalled definition.
@@ -70,7 +70,7 @@ func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) {
 				state := NewState(op)
 				state := NewState(op)
 				st = &state
 				st = &state
 			}
 			}
-			sourceMaps[i] = NewSourceMap(st, info.Filename, info.Data)
+			sourceMaps[i] = NewSourceMap(st, info.Filename, info.Language, info.Data)
 		}
 		}
 
 
 		for dgst, locs := range def.Source.Locations {
 		for dgst, locs := range def.Source.Locations {
@@ -101,7 +101,7 @@ func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) {
 		platforms:  platforms,
 		platforms:  platforms,
 		dgst:       dgst,
 		dgst:       dgst,
 		index:      index,
 		index:      index,
-		inputCache: make(map[digest.Digest][]*DefinitionOp),
+		inputCache: new(sync.Map),
 	}, nil
 	}, nil
 }
 }
 
 
@@ -180,6 +180,18 @@ func (d *DefinitionOp) Output() Output {
 	}}
 	}}
 }
 }
 
 
+func (d *DefinitionOp) loadInputCache(dgst digest.Digest) ([]*DefinitionOp, bool) {
+	a, ok := d.inputCache.Load(dgst.String())
+	if ok {
+		return a.([]*DefinitionOp), true
+	}
+	return nil, false
+}
+
+func (d *DefinitionOp) storeInputCache(dgst digest.Digest, c []*DefinitionOp) {
+	d.inputCache.Store(dgst.String(), c)
+}
+
 func (d *DefinitionOp) Inputs() []Output {
 func (d *DefinitionOp) Inputs() []Output {
 	if d.dgst == "" {
 	if d.dgst == "" {
 		return nil
 		return nil
@@ -195,7 +207,7 @@ func (d *DefinitionOp) Inputs() []Output {
 	for _, input := range op.Inputs {
 	for _, input := range op.Inputs {
 		var vtx *DefinitionOp
 		var vtx *DefinitionOp
 		d.mu.Lock()
 		d.mu.Lock()
-		if existingIndexes, ok := d.inputCache[input.Digest]; ok {
+		if existingIndexes, ok := d.loadInputCache(input.Digest); ok {
 			if int(input.Index) < len(existingIndexes) && existingIndexes[input.Index] != nil {
 			if int(input.Index) < len(existingIndexes) && existingIndexes[input.Index] != nil {
 				vtx = existingIndexes[input.Index]
 				vtx = existingIndexes[input.Index]
 			}
 			}
@@ -211,14 +223,14 @@ func (d *DefinitionOp) Inputs() []Output {
 				inputCache: d.inputCache,
 				inputCache: d.inputCache,
 				sources:    d.sources,
 				sources:    d.sources,
 			}
 			}
-			existingIndexes := d.inputCache[input.Digest]
+			existingIndexes, _ := d.loadInputCache(input.Digest)
 			indexDiff := int(input.Index) - len(existingIndexes)
 			indexDiff := int(input.Index) - len(existingIndexes)
 			if indexDiff >= 0 {
 			if indexDiff >= 0 {
 				// make room in the slice for the new index being set
 				// make room in the slice for the new index being set
 				existingIndexes = append(existingIndexes, make([]*DefinitionOp, indexDiff+1)...)
 				existingIndexes = append(existingIndexes, make([]*DefinitionOp, indexDiff+1)...)
 			}
 			}
 			existingIndexes[input.Index] = vtx
 			existingIndexes[input.Index] = vtx
-			d.inputCache[input.Digest] = existingIndexes
+			d.storeInputCache(input.Digest, existingIndexes)
 		}
 		}
 		d.mu.Unlock()
 		d.mu.Unlock()
 
 

+ 3 - 1
vendor/github.com/moby/buildkit/client/llb/diff.go

@@ -90,6 +90,8 @@ func (m *DiffOp) Inputs() (out []Output) {
 	return out
 	return out
 }
 }
 
 
+// Diff returns a state that represents the diff of the lower and upper states.
+// The returned State is useful for use with [Merge] where you can merge the lower state with the diff.
 func Diff(lower, upper State, opts ...ConstraintsOpt) State {
 func Diff(lower, upper State, opts ...ConstraintsOpt) State {
 	if lower.Output() == nil {
 	if lower.Output() == nil {
 		if upper.Output() == nil {
 		if upper.Output() == nil {
@@ -104,5 +106,5 @@ func Diff(lower, upper State, opts ...ConstraintsOpt) State {
 	for _, o := range opts {
 	for _, o := range opts {
 		o.SetConstraintsOption(&c)
 		o.SetConstraintsOption(&c)
 	}
 	}
-	return NewState(NewDiff(lower, upper, c).Output())
+	return lower.WithOutput(NewDiff(lower, upper, c).Output())
 }
 }

+ 6 - 1
vendor/github.com/moby/buildkit/client/llb/exec.go

@@ -339,7 +339,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
 			inputIndex = pb.Empty
 			inputIndex = pb.Empty
 		}
 		}
 
 
-		outputIndex := pb.OutputIndex(-1)
+		outputIndex := pb.SkipOutput
 		if !m.noOutput && !m.readonly && m.cacheID == "" && !m.tmpfs {
 		if !m.noOutput && !m.readonly && m.cacheID == "" && !m.tmpfs {
 			outputIndex = pb.OutputIndex(outIndex)
 			outputIndex = pb.OutputIndex(outIndex)
 			outIndex++
 			outIndex++
@@ -649,6 +649,7 @@ type SSHInfo struct {
 	Optional bool
 	Optional bool
 }
 }
 
 
+// AddSecret is a RunOption that adds a secret to the exec.
 func AddSecret(dest string, opts ...SecretOption) RunOption {
 func AddSecret(dest string, opts ...SecretOption) RunOption {
 	return runOptionFunc(func(ei *ExecInfo) {
 	return runOptionFunc(func(ei *ExecInfo) {
 		s := &SecretInfo{ID: dest, Target: dest, Mode: 0400}
 		s := &SecretInfo{ID: dest, Target: dest, Mode: 0400}
@@ -696,6 +697,7 @@ func SecretAsEnv(v bool) SecretOption {
 	})
 	})
 }
 }
 
 
+// SecretFileOpt sets the secret's target file uid, gid and permissions.
 func SecretFileOpt(uid, gid, mode int) SecretOption {
 func SecretFileOpt(uid, gid, mode int) SecretOption {
 	return secretOptionFunc(func(si *SecretInfo) {
 	return secretOptionFunc(func(si *SecretInfo) {
 		si.UID = uid
 		si.UID = uid
@@ -704,12 +706,15 @@ func SecretFileOpt(uid, gid, mode int) SecretOption {
 	})
 	})
 }
 }
 
 
+// ReadonlyRootFS sets the execs's root filesystem to be read-only.
 func ReadonlyRootFS() RunOption {
 func ReadonlyRootFS() RunOption {
 	return runOptionFunc(func(ei *ExecInfo) {
 	return runOptionFunc(func(ei *ExecInfo) {
 		ei.ReadonlyRootFS = true
 		ei.ReadonlyRootFS = true
 	})
 	})
 }
 }
 
 
+// WithProxy is a RunOption that sets the proxy environment variables in the resulting exec.
+// For example `HTTP_PROXY` is a standard environment variable for unix systems that programs may read.
 func WithProxy(ps ProxyEnv) RunOption {
 func WithProxy(ps ProxyEnv) RunOption {
 	return runOptionFunc(func(ei *ExecInfo) {
 	return runOptionFunc(func(ei *ExecInfo) {
 		ei.ProxyEnv = &ps
 		ei.ProxyEnv = &ps

+ 48 - 13
vendor/github.com/moby/buildkit/client/llb/fileop.go

@@ -48,6 +48,7 @@ func NewFileOp(s State, action *FileAction, c Constraints) *FileOp {
 }
 }
 
 
 // CopyInput is either llb.State or *FileActionWithState
 // CopyInput is either llb.State or *FileActionWithState
+// It is used by [Copy] to to specify the source of the copy operation.
 type CopyInput interface {
 type CopyInput interface {
 	isFileOpCopyInput()
 	isFileOpCopyInput()
 }
 }
@@ -60,6 +61,10 @@ type capAdder interface {
 	addCaps(*FileOp)
 	addCaps(*FileOp)
 }
 }
 
 
+// FileAction is used to specify a file operation on a [State].
+// It can be used to create a directory, create a file, or remove a file, etc.
+// This is used by [State.File]
+// Typically a FileAction is created by calling one of the helper functions such as [Mkdir], [Copy], [Rm], [Mkfile]
 type FileAction struct {
 type FileAction struct {
 	state  *State
 	state  *State
 	prev   *FileAction
 	prev   *FileAction
@@ -131,11 +136,16 @@ type fileActionWithState struct {
 
 
 func (fas *fileActionWithState) isFileOpCopyInput() {}
 func (fas *fileActionWithState) isFileOpCopyInput() {}
 
 
+// Mkdir creates a FileAction which creates a directory at the given path.
+// Example:
+//
+//	llb.Scratch().File(llb.Mkdir("/foo", 0755))
 func Mkdir(p string, m os.FileMode, opt ...MkdirOption) *FileAction {
 func Mkdir(p string, m os.FileMode, opt ...MkdirOption) *FileAction {
 	var mi MkdirInfo
 	var mi MkdirInfo
 	for _, o := range opt {
 	for _, o := range opt {
 		o.SetMkdirOption(&mi)
 		o.SetMkdirOption(&mi)
 	}
 	}
+
 	return &FileAction{
 	return &FileAction{
 		action: &fileActionMkdir{
 		action: &fileActionMkdir{
 			file: p,
 			file: p,
@@ -181,6 +191,7 @@ func (fn mkdirOptionFunc) SetMkdirOption(mi *MkdirInfo) {
 
 
 var _ MkdirOption = &MkdirInfo{}
 var _ MkdirOption = &MkdirInfo{}
 
 
+// WithParents is an option for Mkdir which creates parent directories if they do not exist.
 func WithParents(b bool) MkdirOption {
 func WithParents(b bool) MkdirOption {
 	return mkdirOptionFunc(func(mi *MkdirInfo) {
 	return mkdirOptionFunc(func(mi *MkdirInfo) {
 		mi.MakeParents = b
 		mi.MakeParents = b
@@ -282,6 +293,10 @@ func (up *UserOpt) marshal(base pb.InputIndex) *pb.UserOpt {
 	return &pb.UserOpt{User: &pb.UserOpt_ByID{ByID: uint32(up.UID)}}
 	return &pb.UserOpt{User: &pb.UserOpt_ByID{ByID: uint32(up.UID)}}
 }
 }
 
 
+// Mkfile creates a FileAction which creates a file at the given path with the provided contents.
+// Example:
+//
+//	llb.Scratch().File(llb.Mkfile("/foo", 0644, []byte("hello world!")))
 func Mkfile(p string, m os.FileMode, dt []byte, opts ...MkfileOption) *FileAction {
 func Mkfile(p string, m os.FileMode, dt []byte, opts ...MkfileOption) *FileAction {
 	var mi MkfileInfo
 	var mi MkfileInfo
 	for _, o := range opts {
 	for _, o := range opts {
@@ -332,6 +347,10 @@ func (a *fileActionMkfile) toProtoAction(ctx context.Context, parent string, bas
 	}, nil
 	}, nil
 }
 }
 
 
+// Rm creates a FileAction which removes a file or directory at the given path.
+// Example:
+//
+//	llb.Scratch().File(Mkfile("/foo", 0644, []byte("not around for long..."))).File(llb.Rm("/foo"))
 func Rm(p string, opts ...RmOption) *FileAction {
 func Rm(p string, opts ...RmOption) *FileAction {
 	var mi RmInfo
 	var mi RmInfo
 	for _, o := range opts {
 	for _, o := range opts {
@@ -394,6 +413,25 @@ func (a *fileActionRm) toProtoAction(ctx context.Context, parent string, base pb
 	}, nil
 	}, nil
 }
 }
 
 
+// Copy produces a FileAction which copies a file or directory from the source to the destination.
+// The "input" parameter is the contents to copy from.
+// "src" is the path to copy from within the "input".
+// "dest" is the path to copy to within the destination (the state being operated on).
+// See [CopyInput] for the valid types of input.
+//
+// Example:
+//
+//	st := llb.Local(".")
+//	llb.Scratch().File(llb.Copy(st, "/foo", "/bar"))
+//
+// The example copies the local (client) directory "./foo" to a new empty directory at /bar.
+//
+// Note: Copying directories can have different behavior based on if the destination exists or not.
+// When the destination already exists, the contents of the source directory is copied underneath the destination, including the directory itself.
+// You may need to supply a copy option to copy the dir contents only.
+// You may also need to pass in a [CopyOption] which creates parent directories if they do not exist.
+//
+// See [CopyOption] for more details on what options are available.
 func Copy(input CopyInput, src, dest string, opts ...CopyOption) *FileAction {
 func Copy(input CopyInput, src, dest string, opts ...CopyOption) *FileAction {
 	var state *State
 	var state *State
 	var fas *fileActionWithState
 	var fas *fileActionWithState
@@ -410,7 +448,6 @@ func Copy(input CopyInput, src, dest string, opts ...CopyOption) *FileAction {
 	for _, o := range opts {
 	for _, o := range opts {
 		o.SetCopyOption(&mi)
 		o.SetCopyOption(&mi)
 	}
 	}
-
 	return &FileAction{
 	return &FileAction{
 		action: &fileActionCopy{
 		action: &fileActionCopy{
 			state: state,
 			state: state,
@@ -486,22 +523,19 @@ func (a *fileActionCopy) toProtoAction(ctx context.Context, parent string, base
 
 
 func (a *fileActionCopy) sourcePath(ctx context.Context) (string, error) {
 func (a *fileActionCopy) sourcePath(ctx context.Context) (string, error) {
 	p := path.Clean(a.src)
 	p := path.Clean(a.src)
+	dir := "/"
+	var err error
 	if !path.IsAbs(p) {
 	if !path.IsAbs(p) {
 		if a.state != nil {
 		if a.state != nil {
-			dir, err := a.state.GetDir(ctx)
-			if err != nil {
-				return "", err
-			}
-			p = path.Join("/", dir, p)
+			dir, err = a.state.GetDir(ctx)
 		} else if a.fas != nil {
 		} else if a.fas != nil {
-			dir, err := a.fas.state.GetDir(ctx)
-			if err != nil {
-				return "", err
-			}
-			p = path.Join("/", dir, p)
+			dir, err = a.fas.state.GetDir(ctx)
+		}
+		if err != nil {
+			return "", err
 		}
 		}
 	}
 	}
-	return p, nil
+	return path.Join(dir, p), nil
 }
 }
 
 
 func (a *fileActionCopy) addCaps(f *FileOp) {
 func (a *fileActionCopy) addCaps(f *FileOp) {
@@ -691,6 +725,7 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
 	}
 	}
 
 
 	pop, md := MarshalConstraints(c, &f.constraints)
 	pop, md := MarshalConstraints(c, &f.constraints)
+	pop.Platform = nil // file op is not platform specific
 	pop.Op = &pb.Op_File{
 	pop.Op = &pb.Op_File{
 		File: pfo,
 		File: pfo,
 	}
 	}
@@ -702,7 +737,7 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
 	pop.Inputs = state.inputs
 	pop.Inputs = state.inputs
 
 
 	for i, st := range state.actions {
 	for i, st := range state.actions {
-		output := pb.OutputIndex(-1)
+		output := pb.SkipOutput
 		if i+1 == len(state.actions) {
 		if i+1 == len(state.actions) {
 			output = 0
 			output = 0
 		}
 		}

+ 7 - 7
vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go

@@ -45,7 +45,6 @@ func New(with ...ImageMetaResolverOpt) llb.ImageMetaResolver {
 	headers.Set("User-Agent", version.UserAgent())
 	headers.Set("User-Agent", version.UserAgent())
 	return &imageMetaResolver{
 	return &imageMetaResolver{
 		resolver: docker.NewResolver(docker.ResolverOptions{
 		resolver: docker.NewResolver(docker.ResolverOptions{
-			Client:  http.DefaultClient,
 			Headers: headers,
 			Headers: headers,
 		}),
 		}),
 		platform: opts.platform,
 		platform: opts.platform,
@@ -71,11 +70,12 @@ type imageMetaResolver struct {
 }
 }
 
 
 type resolveResult struct {
 type resolveResult struct {
+	ref    string
 	config []byte
 	config []byte
 	dgst   digest.Digest
 	dgst   digest.Digest
 }
 }
 
 
-func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) {
+func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (string, digest.Digest, []byte, error) {
 	imr.locker.Lock(ref)
 	imr.locker.Lock(ref)
 	defer imr.locker.Unlock(ref)
 	defer imr.locker.Unlock(ref)
 
 
@@ -87,16 +87,16 @@ func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string
 	k := imr.key(ref, platform)
 	k := imr.key(ref, platform)
 
 
 	if res, ok := imr.cache[k]; ok {
 	if res, ok := imr.cache[k]; ok {
-		return res.dgst, res.config, nil
+		return res.ref, res.dgst, res.config, nil
 	}
 	}
 
 
-	dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, nil, platform)
+	ref, dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, nil, platform, opt.SourcePolicies)
 	if err != nil {
 	if err != nil {
-		return "", nil, err
+		return "", "", nil, err
 	}
 	}
 
 
-	imr.cache[k] = resolveResult{dgst: dgst, config: config}
-	return dgst, config, nil
+	imr.cache[k] = resolveResult{dgst: dgst, config: config, ref: ref}
+	return ref, dgst, config, nil
 }
 }
 
 
 func (imr *imageMetaResolver) key(ref string, platform *ocispecs.Platform) string {
 func (imr *imageMetaResolver) key(ref string, platform *ocispecs.Platform) string {

+ 26 - 1
vendor/github.com/moby/buildkit/client/llb/merge.go

@@ -70,6 +70,31 @@ func (m *MergeOp) Inputs() []Output {
 	return m.inputs
 	return m.inputs
 }
 }
 
 
+// Merge merges multiple states into a single state. This is useful in
+// conjunction with [Diff] to create set of patches which are independent of
+// each other to a base state without affecting the cache of other merged
+// states.
+// As an example, lets say you have a rootfs with the following directories:
+//
+//	/ /bin /etc /opt /tmp
+//
+// Now lets say you want to copy a directory /etc/foo from one state and a
+// binary /bin/bar from another state.
+// [Copy] makes a duplicate of file on top of another directory.
+// Merge creates a directory whose contents is an overlay of 2 states on top of each other.
+//
+// With "Merge" you can do this:
+//
+//	fooState := Diff(rootfs, fooState)
+//	barState := Diff(rootfs, barState)
+//
+// Then merge the results with:
+//
+//	Merge(rootfs, fooDiff, barDiff)
+//
+// The resulting state will have both /etc/foo and /bin/bar, but because Merge
+// was used, changing the contents of "fooDiff" does not require copying
+// "barDiff" again.
 func Merge(inputs []State, opts ...ConstraintsOpt) State {
 func Merge(inputs []State, opts ...ConstraintsOpt) State {
 	// filter out any scratch inputs, which have no effect when merged
 	// filter out any scratch inputs, which have no effect when merged
 	var filteredInputs []State
 	var filteredInputs []State
@@ -92,5 +117,5 @@ func Merge(inputs []State, opts ...ConstraintsOpt) State {
 		o.SetConstraintsOption(&c)
 		o.SetConstraintsOption(&c)
 	}
 	}
 	addCap(&c, pb.CapMergeOp)
 	addCap(&c, pb.CapMergeOp)
-	return NewState(NewMerge(filteredInputs, c).Output())
+	return filteredInputs[0].WithOutput(NewMerge(filteredInputs, c).Output())
 }
 }

+ 26 - 1
vendor/github.com/moby/buildkit/client/llb/meta.go

@@ -10,6 +10,7 @@ import (
 	"github.com/google/shlex"
 	"github.com/google/shlex"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/solver/pb"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
 )
 )
 
 
 type contextKeyT string
 type contextKeyT string
@@ -29,10 +30,15 @@ var (
 	keySecurity = contextKeyT("llb.security")
 	keySecurity = contextKeyT("llb.security")
 )
 )
 
 
+// AddEnvf is the same as [AddEnv] but allows for a format string.
+// This is the equivalent of `[State.AddEnvf]`
 func AddEnvf(key, value string, v ...interface{}) StateOption {
 func AddEnvf(key, value string, v ...interface{}) StateOption {
 	return addEnvf(key, value, true, v...)
 	return addEnvf(key, value, true, v...)
 }
 }
 
 
+// AddEnv returns a [StateOption] whichs adds an environment variable to the state.
+// Use this with [State.With] to create a new state with the environment variable set.
+// This is the equivalent of `[State.AddEnv]`
 func AddEnv(key, value string) StateOption {
 func AddEnv(key, value string) StateOption {
 	return addEnvf(key, value, false)
 	return addEnvf(key, value, false)
 }
 }
@@ -52,10 +58,14 @@ func addEnvf(key, value string, replace bool, v ...interface{}) StateOption {
 	}
 	}
 }
 }
 
 
+// Dir returns a [StateOption] sets the working directory for the state which will be used to resolve
+// relative paths as well as the working directory for [State.Run].
+// See [State.With] for where to use this.
 func Dir(str string) StateOption {
 func Dir(str string) StateOption {
 	return dirf(str, false)
 	return dirf(str, false)
 }
 }
 
 
+// Dirf is the same as [Dir] but allows for a format string.
 func Dirf(str string, v ...interface{}) StateOption {
 func Dirf(str string, v ...interface{}) StateOption {
 	return dirf(str, true, v...)
 	return dirf(str, true, v...)
 }
 }
@@ -69,7 +79,7 @@ func dirf(value string, replace bool, v ...interface{}) StateOption {
 			if !path.IsAbs(value) {
 			if !path.IsAbs(value) {
 				prev, err := getDir(s)(ctx, c)
 				prev, err := getDir(s)(ctx, c)
 				if err != nil {
 				if err != nil {
-					return nil, err
+					return nil, errors.Wrap(err, "getting dir from state")
 				}
 				}
 				if prev == "" {
 				if prev == "" {
 					prev = "/"
 					prev = "/"
@@ -81,12 +91,18 @@ func dirf(value string, replace bool, v ...interface{}) StateOption {
 	}
 	}
 }
 }
 
 
+// User returns a [StateOption] which sets the user for the state which will be used by [State.Run].
+// This is the equivalent of [State.User]
+// See [State.With] for where to use this.
 func User(str string) StateOption {
 func User(str string) StateOption {
 	return func(s State) State {
 	return func(s State) State {
 		return s.WithValue(keyUser, str)
 		return s.WithValue(keyUser, str)
 	}
 	}
 }
 }
 
 
+// Reset returns a [StateOption] which creates a new [State] with just the
+// output of the current [State] and the provided [State] is set as the parent.
+// This is the equivalent of [State.Reset]
 func Reset(other State) StateOption {
 func Reset(other State) StateOption {
 	return func(s State) State {
 	return func(s State) State {
 		s = NewState(s.Output())
 		s = NewState(s.Output())
@@ -147,6 +163,9 @@ func getUser(s State) func(context.Context, *Constraints) (string, error) {
 	}
 	}
 }
 }
 
 
+// Hostname returns a [StateOption] which sets the hostname used for containers created by [State.Run].
+// This is the equivalent of [State.Hostname]
+// See [State.With] for where to use this.
 func Hostname(str string) StateOption {
 func Hostname(str string) StateOption {
 	return func(s State) State {
 	return func(s State) State {
 		return s.WithValue(keyHostname, str)
 		return s.WithValue(keyHostname, str)
@@ -283,6 +302,9 @@ func getCgroupParent(s State) func(context.Context, *Constraints) (string, error
 	}
 	}
 }
 }
 
 
+// Network returns a [StateOption] which sets the network mode used for containers created by [State.Run].
+// This is the equivalent of [State.Network]
+// See [State.With] for where to use this.
 func Network(v pb.NetMode) StateOption {
 func Network(v pb.NetMode) StateOption {
 	return func(s State) State {
 	return func(s State) State {
 		return s.WithValue(keyNetwork, v)
 		return s.WithValue(keyNetwork, v)
@@ -302,6 +324,9 @@ func getNetwork(s State) func(context.Context, *Constraints) (pb.NetMode, error)
 	}
 	}
 }
 }
 
 
+// Security returns a [StateOption] which sets the security mode used for containers created by [State.Run].
+// This is the equivalent of [State.Security]
+// See [State.With] for where to use this.
 func Security(v pb.SecurityMode) StateOption {
 func Security(v pb.SecurityMode) StateOption {
 	return func(s State) State {
 	return func(s State) State {
 		return s.WithValue(keySecurity, v)
 		return s.WithValue(keySecurity, v)

+ 4 - 1
vendor/github.com/moby/buildkit/client/llb/resolver.go

@@ -3,6 +3,7 @@ package llb
 import (
 import (
 	"context"
 	"context"
 
 
+	spb "github.com/moby/buildkit/sourcepolicy/pb"
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 )
 )
@@ -31,7 +32,7 @@ func WithLayerLimit(l int) ImageOption {
 
 
 // ImageMetaResolver can resolve image config metadata from a reference
 // ImageMetaResolver can resolve image config metadata from a reference
 type ImageMetaResolver interface {
 type ImageMetaResolver interface {
-	ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (digest.Digest, []byte, error)
+	ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (string, digest.Digest, []byte, error)
 }
 }
 
 
 type ResolverType int
 type ResolverType int
@@ -49,6 +50,8 @@ type ResolveImageConfigOpt struct {
 	LogName     string
 	LogName     string
 
 
 	Store ResolveImageConfigOptStore
 	Store ResolveImageConfigOptStore
+
+	SourcePolicies []*spb.Policy
 }
 }
 
 
 type ResolveImageConfigOptStore struct {
 type ResolveImageConfigOptStore struct {

+ 26 - 2
vendor/github.com/moby/buildkit/client/llb/source.go

@@ -91,6 +91,10 @@ func (s *SourceOp) Inputs() []Output {
 	return nil
 	return nil
 }
 }
 
 
+// Image returns a state that represents a docker image in a registry.
+// Example:
+//
+//	st := llb.Image("busybox:latest")
 func Image(ref string, opts ...ImageOption) State {
 func Image(ref string, opts ...ImageOption) State {
 	r, err := reference.ParseNormalizedNamed(ref)
 	r, err := reference.ParseNormalizedNamed(ref)
 	if err == nil {
 	if err == nil {
@@ -131,7 +135,7 @@ func Image(ref string, opts ...ImageOption) State {
 				if p == nil {
 				if p == nil {
 					p = c.Platform
 					p = c.Platform
 				}
 				}
-				_, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, ResolveImageConfigOpt{
+				_, _, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, ResolveImageConfigOpt{
 					Platform:     p,
 					Platform:     p,
 					ResolveMode:  info.resolveMode.String(),
 					ResolveMode:  info.resolveMode.String(),
 					ResolverType: ResolverTypeRegistry,
 					ResolverType: ResolverTypeRegistry,
@@ -147,7 +151,7 @@ func Image(ref string, opts ...ImageOption) State {
 			if p == nil {
 			if p == nil {
 				p = c.Platform
 				p = c.Platform
 			}
 			}
-			dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{
+			ref, dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{
 				Platform:     p,
 				Platform:     p,
 				ResolveMode:  info.resolveMode.String(),
 				ResolveMode:  info.resolveMode.String(),
 				ResolverType: ResolverTypeRegistry,
 				ResolverType: ResolverTypeRegistry,
@@ -155,6 +159,10 @@ func Image(ref string, opts ...ImageOption) State {
 			if err != nil {
 			if err != nil {
 				return State{}, err
 				return State{}, err
 			}
 			}
+			r, err := reference.ParseNormalizedNamed(ref)
+			if err != nil {
+				return State{}, err
+			}
 			if dgst != "" {
 			if dgst != "" {
 				r, err = reference.WithDigest(r, dgst)
 				r, err = reference.WithDigest(r, dgst)
 				if err != nil {
 				if err != nil {
@@ -215,6 +223,20 @@ type ImageInfo struct {
 	RecordType    string
 	RecordType    string
 }
 }
 
 
+// Git returns a state that represents a git repository.
+// Example:
+//
+//	st := llb.Git("https://github.com/moby/buildkit.git#v0.11.6")
+//
+// The example fetches the v0.11.6 tag of the buildkit repository.
+// You can also use a commit hash or a branch name.
+//
+// Other URL formats are supported such as "git@github.com:moby/buildkit.git", "git://...", "ssh://..."
+// Formats that utilize SSH may need to supply credentials as a [GitOption].
+// You may need to check the source code for a full list of supported formats.
+//
+// By default the git repository is cloned with `--depth=1` to reduce the amount of data downloaded.
+// Additionally the ".git" directory is removed after the clone, you can keep ith with the [KeepGitDir] [GitOption].
 func Git(remote, ref string, opts ...GitOption) State {
 func Git(remote, ref string, opts ...GitOption) State {
 	url := strings.Split(remote, "#")[0]
 	url := strings.Split(remote, "#")[0]
 
 
@@ -346,10 +368,12 @@ func MountSSHSock(sshID string) GitOption {
 	})
 	})
 }
 }
 
 
+// Scratch returns a state that represents an empty filesystem.
 func Scratch() State {
 func Scratch() State {
 	return NewState(nil)
 	return NewState(nil)
 }
 }
 
 
+// Local returns a state that represents a directory local to the client.
 func Local(name string, opts ...LocalOption) State {
 func Local(name string, opts ...LocalOption) State {
 	gi := &LocalInfo{}
 	gi := &LocalInfo{}
 
 

+ 16 - 2
vendor/github.com/moby/buildkit/client/llb/sourcemap.go

@@ -7,17 +7,30 @@ import (
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
 )
 )
 
 
+// SourceMap maps a source file/location to an LLB state/definition.
+// SourceMaps are used to provide information for debugging and helpful error messages to the user.
+// As an example, lets say you have a Dockerfile with the following content:
+//
+//	FROM alpine
+//	RUN exit 1
+//
+// When the "RUN" statement exits with a non-zero exit code buildkit will treat
+// it as an error and is able to provide the user with a helpful error message
+// pointing to exactly the line in the Dockerfile that caused the error.
 type SourceMap struct {
 type SourceMap struct {
 	State      *State
 	State      *State
 	Definition *Definition
 	Definition *Definition
 	Filename   string
 	Filename   string
-	Data       []byte
+	// Language should use names defined in https://github.com/github/linguist/blob/v7.24.1/lib/linguist/languages.yml
+	Language string
+	Data     []byte
 }
 }
 
 
-func NewSourceMap(st *State, filename string, dt []byte) *SourceMap {
+func NewSourceMap(st *State, filename string, lang string, dt []byte) *SourceMap {
 	return &SourceMap{
 	return &SourceMap{
 		State:    st,
 		State:    st,
 		Filename: filename,
 		Filename: filename,
+		Language: lang,
 		Data:     dt,
 		Data:     dt,
 	}
 	}
 }
 }
@@ -82,6 +95,7 @@ func (smc *sourceMapCollector) Marshal(ctx context.Context, co ...ConstraintsOpt
 		info := &pb.SourceInfo{
 		info := &pb.SourceInfo{
 			Data:     m.Data,
 			Data:     m.Data,
 			Filename: m.Filename,
 			Filename: m.Filename,
+			Language: m.Language,
 		}
 		}
 
 
 		if def != nil {
 		if def != nil {

+ 65 - 0
vendor/github.com/moby/buildkit/client/llb/state.go

@@ -49,6 +49,12 @@ func NewState(o Output) State {
 	return s
 	return s
 }
 }
 
 
+// State represents all operations that must be done to produce a given output.
+// States are immutable, and all operations return a new state linked to the previous one.
+// State is the core type of the LLB API and is used to build a graph of operations.
+// The graph is then marshaled into a definition that can be executed by a backend (such as buildkitd).
+//
+// Operations performed on a State are executed lazily after the entire state graph is marshalled and sent to the backend.
 type State struct {
 type State struct {
 	out   Output
 	out   Output
 	prev  *State
 	prev  *State
@@ -123,6 +129,7 @@ func (s State) SetMarshalDefaults(co ...ConstraintsOpt) State {
 	return s
 	return s
 }
 }
 
 
+// Marshal marshals the state and all its parents into a [Definition].
 func (s State) Marshal(ctx context.Context, co ...ConstraintsOpt) (*Definition, error) {
 func (s State) Marshal(ctx context.Context, co ...ConstraintsOpt) (*Definition, error) {
 	c := NewConstraints(append(s.opts, co...)...)
 	c := NewConstraints(append(s.opts, co...)...)
 	def := &Definition{
 	def := &Definition{
@@ -208,10 +215,13 @@ func marshal(ctx context.Context, v Vertex, def *Definition, s *sourceMapCollect
 	return def, nil
 	return def, nil
 }
 }
 
 
+// Validate validates the state.
+// This validation, unlike most other operations on [State], is not lazily performed.
 func (s State) Validate(ctx context.Context, c *Constraints) error {
 func (s State) Validate(ctx context.Context, c *Constraints) error {
 	return s.Output().Vertex(ctx, c).Validate(ctx, c)
 	return s.Output().Vertex(ctx, c).Validate(ctx, c)
 }
 }
 
 
+// Output returns the output of the state.
 func (s State) Output() Output {
 func (s State) Output() Output {
 	if s.async != nil {
 	if s.async != nil {
 		return s.async.Output()
 		return s.async.Output()
@@ -219,6 +229,7 @@ func (s State) Output() Output {
 	return s.out
 	return s.out
 }
 }
 
 
+// WithOutput creats a new state with the output set to the given output.
 func (s State) WithOutput(o Output) State {
 func (s State) WithOutput(o Output) State {
 	prev := s
 	prev := s
 	s = State{
 	s = State{
@@ -229,6 +240,7 @@ func (s State) WithOutput(o Output) State {
 	return s
 	return s
 }
 }
 
 
+// WithImageConfig adds the environment variables, working directory, and platform specified in the image config to the state.
 func (s State) WithImageConfig(c []byte) (State, error) {
 func (s State) WithImageConfig(c []byte) (State, error) {
 	var img ocispecs.Image
 	var img ocispecs.Image
 	if err := json.Unmarshal(c, &img); err != nil {
 	if err := json.Unmarshal(c, &img); err != nil {
@@ -255,6 +267,12 @@ func (s State) WithImageConfig(c []byte) (State, error) {
 	return s, nil
 	return s, nil
 }
 }
 
 
+// Run performs the command specified by the arguments within the contexst of the current [State].
+// The command is executed as a container with the [State]'s filesystem as the root filesystem.
+// As such any command you run must be present in the [State]'s filesystem.
+// Constraints such as [State.Ulimit], [State.ParentCgroup], [State.Network], etc. are applied to the container.
+//
+// Run is useful when none of the LLB ops are sufficient for the operation that you want to perform.
 func (s State) Run(ro ...RunOption) ExecState {
 func (s State) Run(ro ...RunOption) ExecState {
 	ei := &ExecInfo{State: s}
 	ei := &ExecInfo{State: s}
 	for _, o := range ro {
 	for _, o := range ro {
@@ -273,6 +291,8 @@ func (s State) Run(ro ...RunOption) ExecState {
 	}
 	}
 }
 }
 
 
+// File performs a file operation on the current state.
+// See [FileAction] for details on the operations that can be performed.
 func (s State) File(a *FileAction, opts ...ConstraintsOpt) State {
 func (s State) File(a *FileAction, opts ...ConstraintsOpt) State {
 	var c Constraints
 	var c Constraints
 	for _, o := range opts {
 	for _, o := range opts {
@@ -282,21 +302,29 @@ func (s State) File(a *FileAction, opts ...ConstraintsOpt) State {
 	return s.WithOutput(NewFileOp(s, a, c).Output())
 	return s.WithOutput(NewFileOp(s, a, c).Output())
 }
 }
 
 
+// AddEnv returns a new [State] with the provided environment variable set.
+// See [AddEnv]
 func (s State) AddEnv(key, value string) State {
 func (s State) AddEnv(key, value string) State {
 	return AddEnv(key, value)(s)
 	return AddEnv(key, value)(s)
 }
 }
 
 
+// AddEnvf is the same as [State.AddEnv] but with a format string.
 func (s State) AddEnvf(key, value string, v ...interface{}) State {
 func (s State) AddEnvf(key, value string, v ...interface{}) State {
 	return AddEnvf(key, value, v...)(s)
 	return AddEnvf(key, value, v...)(s)
 }
 }
 
 
+// Dir returns a new [State] with the provided working directory set.
+// See [Dir]
 func (s State) Dir(str string) State {
 func (s State) Dir(str string) State {
 	return Dir(str)(s)
 	return Dir(str)(s)
 }
 }
+
+// Dirf is the same as [State.Dir] but with a format string.
 func (s State) Dirf(str string, v ...interface{}) State {
 func (s State) Dirf(str string, v ...interface{}) State {
 	return Dirf(str, v...)(s)
 	return Dirf(str, v...)(s)
 }
 }
 
 
+// GetEnv returns the value of the environment variable with the provided key.
 func (s State) GetEnv(ctx context.Context, key string, co ...ConstraintsOpt) (string, bool, error) {
 func (s State) GetEnv(ctx context.Context, key string, co ...ConstraintsOpt) (string, bool, error) {
 	c := &Constraints{}
 	c := &Constraints{}
 	for _, f := range co {
 	for _, f := range co {
@@ -310,6 +338,8 @@ func (s State) GetEnv(ctx context.Context, key string, co ...ConstraintsOpt) (st
 	return v, ok, nil
 	return v, ok, nil
 }
 }
 
 
+// Env returns a new [State] with the provided environment variable set.
+// See [Env]
 func (s State) Env(ctx context.Context, co ...ConstraintsOpt) ([]string, error) {
 func (s State) Env(ctx context.Context, co ...ConstraintsOpt) ([]string, error) {
 	c := &Constraints{}
 	c := &Constraints{}
 	for _, f := range co {
 	for _, f := range co {
@@ -322,6 +352,7 @@ func (s State) Env(ctx context.Context, co ...ConstraintsOpt) ([]string, error)
 	return env.ToArray(), nil
 	return env.ToArray(), nil
 }
 }
 
 
+// GetDir returns the current working directory for the state.
 func (s State) GetDir(ctx context.Context, co ...ConstraintsOpt) (string, error) {
 func (s State) GetDir(ctx context.Context, co ...ConstraintsOpt) (string, error) {
 	c := &Constraints{}
 	c := &Constraints{}
 	for _, f := range co {
 	for _, f := range co {
@@ -338,18 +369,28 @@ func (s State) GetArgs(ctx context.Context, co ...ConstraintsOpt) ([]string, err
 	return getArgs(s)(ctx, c)
 	return getArgs(s)(ctx, c)
 }
 }
 
 
+// Reset is used to return a new [State] with all of the current state and the
+// provided [State] as the parent. In effect you can think of this as creating
+// a new state with all the output from the current state but reparented to the
+// provided state.  See [Reset] for more details.
 func (s State) Reset(s2 State) State {
 func (s State) Reset(s2 State) State {
 	return Reset(s2)(s)
 	return Reset(s2)(s)
 }
 }
 
 
+// User sets the user for this state.
+// See [User] for more details.
 func (s State) User(v string) State {
 func (s State) User(v string) State {
 	return User(v)(s)
 	return User(v)(s)
 }
 }
 
 
+// Hostname sets the hostname for this state.
+// See [Hostname] for more details.
 func (s State) Hostname(v string) State {
 func (s State) Hostname(v string) State {
 	return Hostname(v)(s)
 	return Hostname(v)(s)
 }
 }
 
 
+// GetHostname returns the hostname set on the state.
+// See [Hostname] for more details.
 func (s State) GetHostname(ctx context.Context, co ...ConstraintsOpt) (string, error) {
 func (s State) GetHostname(ctx context.Context, co ...ConstraintsOpt) (string, error) {
 	c := &Constraints{}
 	c := &Constraints{}
 	for _, f := range co {
 	for _, f := range co {
@@ -358,10 +399,14 @@ func (s State) GetHostname(ctx context.Context, co ...ConstraintsOpt) (string, e
 	return getHostname(s)(ctx, c)
 	return getHostname(s)(ctx, c)
 }
 }
 
 
+// Platform sets the platform for the state. Platforms are used to determine
+// image variants to pull and run as well as the platform metadata to set on the
+// image config.
 func (s State) Platform(p ocispecs.Platform) State {
 func (s State) Platform(p ocispecs.Platform) State {
 	return platform(p)(s)
 	return platform(p)(s)
 }
 }
 
 
+// GetPlatform returns the platform for the state.
 func (s State) GetPlatform(ctx context.Context, co ...ConstraintsOpt) (*ocispecs.Platform, error) {
 func (s State) GetPlatform(ctx context.Context, co ...ConstraintsOpt) (*ocispecs.Platform, error) {
 	c := &Constraints{}
 	c := &Constraints{}
 	for _, f := range co {
 	for _, f := range co {
@@ -370,10 +415,14 @@ func (s State) GetPlatform(ctx context.Context, co ...ConstraintsOpt) (*ocispecs
 	return getPlatform(s)(ctx, c)
 	return getPlatform(s)(ctx, c)
 }
 }
 
 
+// Network sets the network mode for the state.
+// Network modes are used by [State.Run] to determine the network mode used when running the container.
+// Network modes are not applied to image configs.
 func (s State) Network(n pb.NetMode) State {
 func (s State) Network(n pb.NetMode) State {
 	return Network(n)(s)
 	return Network(n)(s)
 }
 }
 
 
+// GetNetwork returns the network mode for the state.
 func (s State) GetNetwork(ctx context.Context, co ...ConstraintsOpt) (pb.NetMode, error) {
 func (s State) GetNetwork(ctx context.Context, co ...ConstraintsOpt) (pb.NetMode, error) {
 	c := &Constraints{}
 	c := &Constraints{}
 	for _, f := range co {
 	for _, f := range co {
@@ -381,10 +430,15 @@ func (s State) GetNetwork(ctx context.Context, co ...ConstraintsOpt) (pb.NetMode
 	}
 	}
 	return getNetwork(s)(ctx, c)
 	return getNetwork(s)(ctx, c)
 }
 }
+
+// Security sets the security mode for the state.
+// Security modes are used by [State.Run] to the privileges that processes in the container will run with.
+// Security modes are not applied to image configs.
 func (s State) Security(n pb.SecurityMode) State {
 func (s State) Security(n pb.SecurityMode) State {
 	return Security(n)(s)
 	return Security(n)(s)
 }
 }
 
 
+// GetSecurity returns the security mode for the state.
 func (s State) GetSecurity(ctx context.Context, co ...ConstraintsOpt) (pb.SecurityMode, error) {
 func (s State) GetSecurity(ctx context.Context, co ...ConstraintsOpt) (pb.SecurityMode, error) {
 	c := &Constraints{}
 	c := &Constraints{}
 	for _, f := range co {
 	for _, f := range co {
@@ -393,6 +447,8 @@ func (s State) GetSecurity(ctx context.Context, co ...ConstraintsOpt) (pb.Securi
 	return getSecurity(s)(ctx, c)
 	return getSecurity(s)(ctx, c)
 }
 }
 
 
+// With applies [StateOption]s to the [State].
+// Each applied [StateOption] creates a new [State] object with the previous as its parent.
 func (s State) With(so ...StateOption) State {
 func (s State) With(so ...StateOption) State {
 	for _, o := range so {
 	for _, o := range so {
 		s = o(s)
 		s = o(s)
@@ -400,14 +456,23 @@ func (s State) With(so ...StateOption) State {
 	return s
 	return s
 }
 }
 
 
+// AddExtraHost adds a host name to IP mapping to any containers created from this state.
 func (s State) AddExtraHost(host string, ip net.IP) State {
 func (s State) AddExtraHost(host string, ip net.IP) State {
 	return extraHost(host, ip)(s)
 	return extraHost(host, ip)(s)
 }
 }
 
 
+// AddUlimit sets the hard/soft for the given ulimit.
+// The ulimit is applied to containers created from this state.
+// Ulimits are Linux specific and only applies to containers created from this state such as via `[State.Run]`
+// Ulimits do not apply to image configs.
 func (s State) AddUlimit(name UlimitName, soft int64, hard int64) State {
 func (s State) AddUlimit(name UlimitName, soft int64, hard int64) State {
 	return ulimit(name, soft, hard)(s)
 	return ulimit(name, soft, hard)(s)
 }
 }
 
 
+// WithCgroupParent sets the parent cgroup for any containers created from this state.
+// This is useful when you want to apply resource constraints to a group of containers.
+// Cgroups are Linux specific and only applies to containers created from this state such as via `[State.Run]`
+// Cgroups do not apply to image configs.
 func (s State) WithCgroupParent(cp string) State {
 func (s State) WithCgroupParent(cp string) State {
 	return cgroupParent(cp)(s)
 	return cgroupParent(cp)(s)
 }
 }

+ 32 - 15
vendor/github.com/moby/buildkit/client/ociindex/ociindex.go

@@ -20,15 +20,18 @@ const (
 )
 )
 
 
 type StoreIndex struct {
 type StoreIndex struct {
-	indexPath string
-	lockPath  string
+	indexPath  string
+	lockPath   string
+	layoutPath string
 }
 }
 
 
 func NewStoreIndex(storePath string) StoreIndex {
 func NewStoreIndex(storePath string) StoreIndex {
 	indexPath := path.Join(storePath, indexFile)
 	indexPath := path.Join(storePath, indexFile)
+	layoutPath := path.Join(storePath, ocispecs.ImageLayoutFile)
 	return StoreIndex{
 	return StoreIndex{
-		indexPath: indexPath,
-		lockPath:  indexPath + lockFileSuffix,
+		indexPath:  indexPath,
+		lockPath:   indexPath + lockFileSuffix,
+		layoutPath: layoutPath,
 	}
 	}
 }
 }
 
 
@@ -58,6 +61,7 @@ func (s StoreIndex) Read() (*ocispecs.Index, error) {
 }
 }
 
 
 func (s StoreIndex) Put(tag string, desc ocispecs.Descriptor) error {
 func (s StoreIndex) Put(tag string, desc ocispecs.Descriptor) error {
+	// lock the store to prevent concurrent access
 	lock := flock.New(s.lockPath)
 	lock := flock.New(s.lockPath)
 	locked, err := lock.TryLock()
 	locked, err := lock.TryLock()
 	if err != nil {
 	if err != nil {
@@ -71,20 +75,33 @@ func (s StoreIndex) Put(tag string, desc ocispecs.Descriptor) error {
 		os.RemoveAll(s.lockPath)
 		os.RemoveAll(s.lockPath)
 	}()
 	}()
 
 
-	f, err := os.OpenFile(s.indexPath, os.O_RDWR|os.O_CREATE, 0644)
+	// create the oci-layout file
+	layout := ocispecs.ImageLayout{
+		Version: ocispecs.ImageLayoutVersion,
+	}
+	layoutData, err := json.Marshal(layout)
+	if err != nil {
+		return err
+	}
+	if err := os.WriteFile(s.layoutPath, layoutData, 0644); err != nil {
+		return err
+	}
+
+	// modify the index file
+	idxFile, err := os.OpenFile(s.indexPath, os.O_RDWR|os.O_CREATE, 0644)
 	if err != nil {
 	if err != nil {
 		return errors.Wrapf(err, "could not open %s", s.indexPath)
 		return errors.Wrapf(err, "could not open %s", s.indexPath)
 	}
 	}
-	defer f.Close()
+	defer idxFile.Close()
 
 
 	var idx ocispecs.Index
 	var idx ocispecs.Index
-	b, err := io.ReadAll(f)
+	idxData, err := io.ReadAll(idxFile)
 	if err != nil {
 	if err != nil {
 		return errors.Wrapf(err, "could not read %s", s.indexPath)
 		return errors.Wrapf(err, "could not read %s", s.indexPath)
 	}
 	}
-	if len(b) > 0 {
-		if err := json.Unmarshal(b, &idx); err != nil {
-			return errors.Wrapf(err, "could not unmarshal %s (%q)", s.indexPath, string(b))
+	if len(idxData) > 0 {
+		if err := json.Unmarshal(idxData, &idx); err != nil {
+			return errors.Wrapf(err, "could not unmarshal %s (%q)", s.indexPath, string(idxData))
 		}
 		}
 	}
 	}
 
 
@@ -92,15 +109,15 @@ func (s StoreIndex) Put(tag string, desc ocispecs.Descriptor) error {
 		return err
 		return err
 	}
 	}
 
 
-	b, err = json.Marshal(idx)
+	idxData, err = json.Marshal(idx)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
-	if _, err = f.WriteAt(b, 0); err != nil {
-		return err
+	if _, err = idxFile.WriteAt(idxData, 0); err != nil {
+		return errors.Wrapf(err, "could not write %s", s.indexPath)
 	}
 	}
-	if err = f.Truncate(int64(len(b))); err != nil {
-		return err
+	if err = idxFile.Truncate(int64(len(idxData))); err != nil {
+		return errors.Wrapf(err, "could not truncate %s", s.indexPath)
 	}
 	}
 	return nil
 	return nil
 }
 }

+ 1 - 1
vendor/github.com/moby/buildkit/client/solve.go

@@ -169,7 +169,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
 		}
 		}
 
 
 		if supportFile && supportDir {
 		if supportFile && supportDir {
-			return nil, errors.Errorf("both file and directory output is not support by %s exporter", ex.Type)
+			return nil, errors.Errorf("both file and directory output is not supported by %s exporter", ex.Type)
 		}
 		}
 		if !supportFile && ex.Output != nil {
 		if !supportFile && ex.Output != nil {
 			return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
 			return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)

+ 8 - 7
vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go

@@ -7,6 +7,7 @@ import (
 // Config provides containerd configuration data for the server
 // Config provides containerd configuration data for the server
 type Config struct {
 type Config struct {
 	Debug bool `toml:"debug"`
 	Debug bool `toml:"debug"`
+	Trace bool `toml:"trace"`
 
 
 	// Root is the path to a directory where buildkit will store persistent data
 	// Root is the path to a directory where buildkit will store persistent data
 	Root string `toml:"root"`
 	Root string `toml:"root"`
@@ -47,7 +48,7 @@ type TLSConfig struct {
 
 
 type GCConfig struct {
 type GCConfig struct {
 	GC            *bool      `toml:"gc"`
 	GC            *bool      `toml:"gc"`
-	GCKeepStorage int64      `toml:"gckeepstorage"`
+	GCKeepStorage DiskSpace  `toml:"gckeepstorage"`
 	GCPolicy      []GCPolicy `toml:"gcpolicy"`
 	GCPolicy      []GCPolicy `toml:"gcpolicy"`
 }
 }
 
 
@@ -114,10 +115,10 @@ type ContainerdConfig struct {
 }
 }
 
 
 type GCPolicy struct {
 type GCPolicy struct {
-	All          bool     `toml:"all"`
-	KeepBytes    int64    `toml:"keepBytes"`
-	KeepDuration int64    `toml:"keepDuration"`
-	Filters      []string `toml:"filters"`
+	All          bool      `toml:"all"`
+	KeepBytes    DiskSpace `toml:"keepBytes"`
+	KeepDuration Duration  `toml:"keepDuration"`
+	Filters      []string  `toml:"filters"`
 }
 }
 
 
 type DNSConfig struct {
 type DNSConfig struct {
@@ -127,6 +128,6 @@ type DNSConfig struct {
 }
 }
 
 
 type HistoryConfig struct {
 type HistoryConfig struct {
-	MaxAge     int64 `toml:"maxAge"`
-	MaxEntries int64 `toml:"maxEntries"`
+	MaxAge     Duration `toml:"maxAge"`
+	MaxEntries int64    `toml:"maxEntries"`
 }
 }

+ 81 - 6
vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go

@@ -1,21 +1,86 @@
 package config
 package config
 
 
+import (
+	"encoding"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/docker/go-units"
+	"github.com/pkg/errors"
+)
+
+type Duration struct {
+	time.Duration
+}
+
+func (d *Duration) UnmarshalText(textb []byte) error {
+	text := stripQuotes(string(textb))
+	if len(text) == 0 {
+		return nil
+	}
+
+	if duration, err := time.ParseDuration(text); err == nil {
+		d.Duration = duration
+		return nil
+	}
+
+	if i, err := strconv.ParseInt(text, 10, 64); err == nil {
+		d.Duration = time.Duration(i) * time.Second
+		return nil
+	}
+
+	return errors.Errorf("invalid duration %s", text)
+}
+
+var _ encoding.TextUnmarshaler = &Duration{}
+
+type DiskSpace struct {
+	Bytes      int64
+	Percentage int64
+}
+
+var _ encoding.TextUnmarshaler = &DiskSpace{}
+
+func (d *DiskSpace) UnmarshalText(textb []byte) error {
+	text := stripQuotes(string(textb))
+	if len(text) == 0 {
+		return nil
+	}
+
+	if text2 := strings.TrimSuffix(text, "%"); len(text2) < len(text) {
+		i, err := strconv.ParseInt(text2, 10, 64)
+		if err != nil {
+			return err
+		}
+		d.Percentage = i
+		return nil
+	}
+
+	if i, err := units.RAMInBytes(text); err == nil {
+		d.Bytes = i
+		return nil
+	}
+
+	return errors.Errorf("invalid disk space %s", text)
+}
+
 const defaultCap int64 = 2e9 // 2GB
 const defaultCap int64 = 2e9 // 2GB
 
 
-func DefaultGCPolicy(p string, keep int64) []GCPolicy {
-	if keep == 0 {
-		keep = DetectDefaultGCCap(p)
+func DefaultGCPolicy(keep DiskSpace) []GCPolicy {
+	if keep == (DiskSpace{}) {
+		keep = DetectDefaultGCCap()
 	}
 	}
 	return []GCPolicy{
 	return []GCPolicy{
 		// if build cache uses more than 512MB delete the most easily reproducible data after it has not been used for 2 days
 		// if build cache uses more than 512MB delete the most easily reproducible data after it has not been used for 2 days
 		{
 		{
 			Filters:      []string{"type==source.local,type==exec.cachemount,type==source.git.checkout"},
 			Filters:      []string{"type==source.local,type==exec.cachemount,type==source.git.checkout"},
-			KeepDuration: 48 * 3600, // 48h
-			KeepBytes:    512 * 1e6, // 512MB
+			KeepDuration: Duration{Duration: time.Duration(48) * time.Hour}, // 48h
+			KeepBytes:    DiskSpace{Bytes: 512 * 1e6},                       // 512MB
 		},
 		},
 		// remove any data not used for 60 days
 		// remove any data not used for 60 days
 		{
 		{
-			KeepDuration: 60 * 24 * 3600, // 60d
+			KeepDuration: Duration{Duration: time.Duration(60) * 24 * time.Hour}, // 60d
 			KeepBytes:    keep,
 			KeepBytes:    keep,
 		},
 		},
 		// keep the unshared build cache under cap
 		// keep the unshared build cache under cap
@@ -29,3 +94,13 @@ func DefaultGCPolicy(p string, keep int64) []GCPolicy {
 		},
 		},
 	}
 	}
 }
 }
+
+func stripQuotes(s string) string {
+	if len(s) == 0 {
+		return s
+	}
+	if s[0] == '"' && s[len(s)-1] == '"' {
+		return s[1 : len(s)-1]
+	}
+	return s
+}

+ 13 - 2
vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go

@@ -7,12 +7,23 @@ import (
 	"syscall"
 	"syscall"
 )
 )
 
 
-func DetectDefaultGCCap(root string) int64 {
+func DetectDefaultGCCap() DiskSpace {
+	return DiskSpace{Percentage: 10}
+}
+
+func (d DiskSpace) AsBytes(root string) int64 {
+	if d.Bytes != 0 {
+		return d.Bytes
+	}
+	if d.Percentage == 0 {
+		return 0
+	}
+
 	var st syscall.Statfs_t
 	var st syscall.Statfs_t
 	if err := syscall.Statfs(root, &st); err != nil {
 	if err := syscall.Statfs(root, &st); err != nil {
 		return defaultCap
 		return defaultCap
 	}
 	}
 	diskSize := int64(st.Bsize) * int64(st.Blocks)
 	diskSize := int64(st.Bsize) * int64(st.Blocks)
-	avail := diskSize / 10
+	avail := diskSize * d.Percentage / 100
 	return (avail/(1<<30) + 1) * 1e9 // round up
 	return (avail/(1<<30) + 1) * 1e9 // round up
 }
 }

+ 6 - 2
vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go

@@ -3,6 +3,10 @@
 
 
 package config
 package config
 
 
-func DetectDefaultGCCap(root string) int64 {
-	return defaultCap
+func DetectDefaultGCCap() DiskSpace {
+	return DiskSpace{Bytes: defaultCap}
+}
+
+func (d DiskSpace) AsBytes(root string) int64 {
+	return d.Bytes
 }
 }

+ 42 - 33
vendor/github.com/moby/buildkit/control/control.go

@@ -10,7 +10,6 @@ import (
 
 
 	contentapi "github.com/containerd/containerd/api/services/content/v1"
 	contentapi "github.com/containerd/containerd/api/services/content/v1"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/content"
-	"github.com/containerd/containerd/leases"
 	"github.com/containerd/containerd/services/content/contentserver"
 	"github.com/containerd/containerd/services/content/contentserver"
 	"github.com/docker/distribution/reference"
 	"github.com/docker/distribution/reference"
 	"github.com/mitchellh/hashstructure/v2"
 	"github.com/mitchellh/hashstructure/v2"
@@ -18,20 +17,24 @@ import (
 	apitypes "github.com/moby/buildkit/api/types"
 	apitypes "github.com/moby/buildkit/api/types"
 	"github.com/moby/buildkit/cache/remotecache"
 	"github.com/moby/buildkit/cache/remotecache"
 	"github.com/moby/buildkit/client"
 	"github.com/moby/buildkit/client"
+	"github.com/moby/buildkit/client/llb"
 	"github.com/moby/buildkit/cmd/buildkitd/config"
 	"github.com/moby/buildkit/cmd/buildkitd/config"
 	controlgateway "github.com/moby/buildkit/control/gateway"
 	controlgateway "github.com/moby/buildkit/control/gateway"
 	"github.com/moby/buildkit/exporter"
 	"github.com/moby/buildkit/exporter"
+	"github.com/moby/buildkit/exporter/containerimage/exptypes"
 	"github.com/moby/buildkit/exporter/util/epoch"
 	"github.com/moby/buildkit/exporter/util/epoch"
 	"github.com/moby/buildkit/frontend"
 	"github.com/moby/buildkit/frontend"
 	"github.com/moby/buildkit/frontend/attestations"
 	"github.com/moby/buildkit/frontend/attestations"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session"
 	"github.com/moby/buildkit/session/grpchijack"
 	"github.com/moby/buildkit/session/grpchijack"
+	containerdsnapshot "github.com/moby/buildkit/snapshot/containerd"
 	"github.com/moby/buildkit/solver"
 	"github.com/moby/buildkit/solver"
 	"github.com/moby/buildkit/solver/llbsolver"
 	"github.com/moby/buildkit/solver/llbsolver"
 	"github.com/moby/buildkit/solver/llbsolver/proc"
 	"github.com/moby/buildkit/solver/llbsolver/proc"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/util/bklog"
 	"github.com/moby/buildkit/util/bklog"
 	"github.com/moby/buildkit/util/imageutil"
 	"github.com/moby/buildkit/util/imageutil"
+	"github.com/moby/buildkit/util/leaseutil"
 	"github.com/moby/buildkit/util/throttle"
 	"github.com/moby/buildkit/util/throttle"
 	"github.com/moby/buildkit/util/tracing/transform"
 	"github.com/moby/buildkit/util/tracing/transform"
 	"github.com/moby/buildkit/version"
 	"github.com/moby/buildkit/version"
@@ -52,14 +55,14 @@ type Opt struct {
 	SessionManager            *session.Manager
 	SessionManager            *session.Manager
 	WorkerController          *worker.Controller
 	WorkerController          *worker.Controller
 	Frontends                 map[string]frontend.Frontend
 	Frontends                 map[string]frontend.Frontend
-	CacheKeyStorage           solver.CacheKeyStorage
+	CacheManager              solver.CacheManager
 	ResolveCacheExporterFuncs map[string]remotecache.ResolveCacheExporterFunc
 	ResolveCacheExporterFuncs map[string]remotecache.ResolveCacheExporterFunc
 	ResolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc
 	ResolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc
 	Entitlements              []string
 	Entitlements              []string
 	TraceCollector            sdktrace.SpanExporter
 	TraceCollector            sdktrace.SpanExporter
 	HistoryDB                 *bbolt.DB
 	HistoryDB                 *bbolt.DB
-	LeaseManager              leases.Manager
-	ContentStore              content.Store
+	LeaseManager              *leaseutil.Manager
+	ContentStore              *containerdsnapshot.Store
 	HistoryConfig             *config.HistoryConfig
 	HistoryConfig             *config.HistoryConfig
 }
 }
 
 
@@ -77,21 +80,22 @@ type Controller struct { // TODO: ControlService
 }
 }
 
 
 func NewController(opt Opt) (*Controller, error) {
 func NewController(opt Opt) (*Controller, error) {
-	cache := solver.NewCacheManager(context.TODO(), "local", opt.CacheKeyStorage, worker.NewCacheResultStorage(opt.WorkerController))
-
 	gatewayForwarder := controlgateway.NewGatewayForwarder()
 	gatewayForwarder := controlgateway.NewGatewayForwarder()
 
 
-	hq := llbsolver.NewHistoryQueue(llbsolver.HistoryQueueOpt{
+	hq, err := llbsolver.NewHistoryQueue(llbsolver.HistoryQueueOpt{
 		DB:           opt.HistoryDB,
 		DB:           opt.HistoryDB,
 		LeaseManager: opt.LeaseManager,
 		LeaseManager: opt.LeaseManager,
 		ContentStore: opt.ContentStore,
 		ContentStore: opt.ContentStore,
 		CleanConfig:  opt.HistoryConfig,
 		CleanConfig:  opt.HistoryConfig,
 	})
 	})
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to create history queue")
+	}
 
 
 	s, err := llbsolver.New(llbsolver.Opt{
 	s, err := llbsolver.New(llbsolver.Opt{
 		WorkerController: opt.WorkerController,
 		WorkerController: opt.WorkerController,
 		Frontends:        opt.Frontends,
 		Frontends:        opt.Frontends,
-		CacheManager:     cache,
+		CacheManager:     opt.CacheManager,
 		CacheResolvers:   opt.ResolveCacheImporterFuncs,
 		CacheResolvers:   opt.ResolveCacheImporterFuncs,
 		GatewayForwarder: gatewayForwarder,
 		GatewayForwarder: gatewayForwarder,
 		SessionManager:   opt.SessionManager,
 		SessionManager:   opt.SessionManager,
@@ -106,7 +110,7 @@ func NewController(opt Opt) (*Controller, error) {
 		opt:              opt,
 		opt:              opt,
 		solver:           s,
 		solver:           s,
 		history:          hq,
 		history:          hq,
-		cache:            cache,
+		cache:            opt.CacheManager,
 		gatewayForwarder: gatewayForwarder,
 		gatewayForwarder: gatewayForwarder,
 	}
 	}
 	c.throttledGC = throttle.After(time.Minute, c.gc)
 	c.throttledGC = throttle.After(time.Minute, c.gc)
@@ -127,7 +131,7 @@ func (c *Controller) Register(server *grpc.Server) {
 	c.gatewayForwarder.Register(server)
 	c.gatewayForwarder.Register(server)
 	tracev1.RegisterTraceServiceServer(server, c)
 	tracev1.RegisterTraceServiceServer(server, c)
 
 
-	store := &roContentStore{c.opt.ContentStore}
+	store := &roContentStore{c.opt.ContentStore.WithFallbackNS(c.opt.ContentStore.Namespace() + "_history")}
 	contentapi.RegisterContentServer(server, contentserver.New(store))
 	contentapi.RegisterContentServer(server, contentserver.New(store))
 }
 }
 
 
@@ -170,7 +174,7 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr
 		imageutil.CancelCacheLeases()
 		imageutil.CancelCacheLeases()
 	}
 	}
 
 
-	ch := make(chan client.UsageInfo)
+	ch := make(chan client.UsageInfo, 32)
 
 
 	eg, ctx := errgroup.WithContext(stream.Context())
 	eg, ctx := errgroup.WithContext(stream.Context())
 	workers, err := c.opt.WorkerController.List()
 	workers, err := c.opt.WorkerController.List()
@@ -182,9 +186,9 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr
 	defer func() {
 	defer func() {
 		if didPrune {
 		if didPrune {
 			if c, ok := c.cache.(interface {
 			if c, ok := c.cache.(interface {
-				ReleaseUnreferenced() error
+				ReleaseUnreferenced(context.Context) error
 			}); ok {
 			}); ok {
-				if err := c.ReleaseUnreferenced(); err != nil {
+				if err := c.ReleaseUnreferenced(ctx); err != nil {
 					bklog.G(ctx).Errorf("failed to release cache metadata: %+v", err)
 					bklog.G(ctx).Errorf("failed to release cache metadata: %+v", err)
 				}
 				}
 			}
 			}
@@ -212,6 +216,11 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr
 	})
 	})
 
 
 	eg2.Go(func() error {
 	eg2.Go(func() error {
+		defer func() {
+			// drain channel on error
+			for range ch {
+			}
+		}()
 		for r := range ch {
 		for r := range ch {
 			didPrune = true
 			didPrune = true
 			if err := stream.Send(&controlapi.UsageRecord{
 			if err := stream.Send(&controlapi.UsageRecord{
@@ -276,7 +285,7 @@ func (c *Controller) UpdateBuildHistory(ctx context.Context, req *controlapi.Upd
 	return &controlapi.UpdateBuildHistoryResponse{}, err
 	return &controlapi.UpdateBuildHistoryResponse{}, err
 }
 }
 
 
-func translateLegacySolveRequest(req *controlapi.SolveRequest) error {
+func translateLegacySolveRequest(req *controlapi.SolveRequest) {
 	// translates ExportRef and ExportAttrs to new Exports (v0.4.0)
 	// translates ExportRef and ExportAttrs to new Exports (v0.4.0)
 	if legacyExportRef := req.Cache.ExportRefDeprecated; legacyExportRef != "" {
 	if legacyExportRef := req.Cache.ExportRefDeprecated; legacyExportRef != "" {
 		ex := &controlapi.CacheOptionsEntry{
 		ex := &controlapi.CacheOptionsEntry{
@@ -302,18 +311,13 @@ func translateLegacySolveRequest(req *controlapi.SolveRequest) error {
 		req.Cache.Imports = append(req.Cache.Imports, im)
 		req.Cache.Imports = append(req.Cache.Imports, im)
 	}
 	}
 	req.Cache.ImportRefsDeprecated = nil
 	req.Cache.ImportRefsDeprecated = nil
-	return nil
 }
 }
 
 
 func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) {
 func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) {
 	atomic.AddInt64(&c.buildCount, 1)
 	atomic.AddInt64(&c.buildCount, 1)
 	defer atomic.AddInt64(&c.buildCount, -1)
 	defer atomic.AddInt64(&c.buildCount, -1)
 
 
-	// This method registers job ID in solver.Solve. Make sure there are no blocking calls before that might delay this.
-
-	if err := translateLegacySolveRequest(req); err != nil {
-		return nil, err
-	}
+	translateLegacySolveRequest(req)
 
 
 	defer func() {
 	defer func() {
 		time.AfterFunc(time.Second, c.throttledGC)
 		time.AfterFunc(time.Second, c.throttledGC)
@@ -329,20 +333,11 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
 
 
 	// if SOURCE_DATE_EPOCH is set, enable it for the exporter
 	// if SOURCE_DATE_EPOCH is set, enable it for the exporter
 	if v, ok := epoch.ParseBuildArgs(req.FrontendAttrs); ok {
 	if v, ok := epoch.ParseBuildArgs(req.FrontendAttrs); ok {
-		if _, ok := req.ExporterAttrs[epoch.KeySourceDateEpoch]; !ok {
-			if req.ExporterAttrs == nil {
-				req.ExporterAttrs = make(map[string]string)
-			}
-			req.ExporterAttrs[epoch.KeySourceDateEpoch] = v
-		}
-	}
-
-	if v, ok := req.FrontendAttrs["build-arg:BUILDKIT_BUILDINFO"]; ok && v != "" {
-		if _, ok := req.ExporterAttrs["buildinfo"]; !ok {
+		if _, ok := req.ExporterAttrs[string(exptypes.OptKeySourceDateEpoch)]; !ok {
 			if req.ExporterAttrs == nil {
 			if req.ExporterAttrs == nil {
 				req.ExporterAttrs = make(map[string]string)
 				req.ExporterAttrs = make(map[string]string)
 			}
 			}
-			req.ExporterAttrs["buildinfo"] = v
+			req.ExporterAttrs[string(exptypes.OptKeySourceDateEpoch)] = v
 		}
 		}
 	}
 	}
 
 
@@ -377,6 +372,10 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
 		if err != nil {
 		if err != nil {
 			return nil, errors.Wrapf(err, "failed to configure %v cache exporter", e.Type)
 			return nil, errors.Wrapf(err, "failed to configure %v cache exporter", e.Type)
 		}
 		}
+		if exp.Exporter == nil {
+			bklog.G(ctx).Debugf("cache exporter resolver for %v returned nil, skipping exporter", e.Type)
+			continue
+		}
 		if exportMode, supported := parseCacheExportMode(e.Attrs["mode"]); !supported {
 		if exportMode, supported := parseCacheExportMode(e.Attrs["mode"]); !supported {
 			bklog.G(ctx).Debugf("skipping invalid cache export mode: %s", e.Attrs["mode"])
 			bklog.G(ctx).Debugf("skipping invalid cache export mode: %s", e.Attrs["mode"])
 		} else {
 		} else {
@@ -416,14 +415,19 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
 		if err != nil {
 		if err != nil {
 			return nil, errors.Wrapf(err, "failed to parse sbom generator %s", src)
 			return nil, errors.Wrapf(err, "failed to parse sbom generator %s", src)
 		}
 		}
+		ref = reference.TagNameOnly(ref)
 
 
 		useCache := true
 		useCache := true
 		if v, ok := req.FrontendAttrs["no-cache"]; ok && v == "" {
 		if v, ok := req.FrontendAttrs["no-cache"]; ok && v == "" {
 			// disable cache if cache is disabled for all stages
 			// disable cache if cache is disabled for all stages
 			useCache = false
 			useCache = false
 		}
 		}
-		ref = reference.TagNameOnly(ref)
-		procs = append(procs, proc.SBOMProcessor(ref.String(), useCache))
+		resolveMode := llb.ResolveModeDefault.String()
+		if v, ok := req.FrontendAttrs["image-resolve-mode"]; ok {
+			resolveMode = v
+		}
+
+		procs = append(procs, proc.SBOMProcessor(ref.String(), useCache, resolveMode))
 	}
 	}
 
 
 	if attrs, ok := attests["provenance"]; ok {
 	if attrs, ok := attests["provenance"]; ok {
@@ -462,6 +466,11 @@ func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Con
 	})
 	})
 
 
 	eg.Go(func() error {
 	eg.Go(func() error {
+		defer func() {
+			// drain channel on error
+			for range ch {
+			}
+		}()
 		for {
 		for {
 			ss, ok := <-ch
 			ss, ok := <-ch
 			if !ok {
 			if !ok {

+ 19 - 18
vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go

@@ -21,6 +21,7 @@ import (
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor/oci"
 	"github.com/moby/buildkit/executor/oci"
+	resourcestypes "github.com/moby/buildkit/executor/resources/types"
 	gatewayapi "github.com/moby/buildkit/frontend/gateway/pb"
 	gatewayapi "github.com/moby/buildkit/frontend/gateway/pb"
 	"github.com/moby/buildkit/identity"
 	"github.com/moby/buildkit/identity"
 	"github.com/moby/buildkit/snapshot"
 	"github.com/moby/buildkit/snapshot"
@@ -78,7 +79,7 @@ func New(client *containerd.Client, root, cgroup string, networkProviders map[pb
 	}
 	}
 }
 }
 
 
-func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) {
+func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (rec resourcestypes.Recorder, err error) {
 	if id == "" {
 	if id == "" {
 		id = identity.NewID()
 		id = identity.NewID()
 	}
 	}
@@ -105,12 +106,12 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
 
 
 	resolvConf, err := oci.GetResolvConf(ctx, w.root, nil, w.dnsConfig)
 	resolvConf, err := oci.GetResolvConf(ctx, w.root, nil, w.dnsConfig)
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 
 
 	hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, nil, meta.Hostname)
 	hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, nil, meta.Hostname)
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 	if clean != nil {
 	if clean != nil {
 		defer clean()
 		defer clean()
@@ -118,12 +119,12 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
 
 
 	mountable, err := root.Src.Mount(ctx, false)
 	mountable, err := root.Src.Mount(ctx, false)
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 
 
 	rootMounts, release, err := mountable.Mount()
 	rootMounts, release, err := mountable.Mount()
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 	if release != nil {
 	if release != nil {
 		defer release()
 		defer release()
@@ -132,14 +133,14 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
 	lm := snapshot.LocalMounterWithMounts(rootMounts)
 	lm := snapshot.LocalMounterWithMounts(rootMounts)
 	rootfsPath, err := lm.Mount()
 	rootfsPath, err := lm.Mount()
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 	defer lm.Unmount()
 	defer lm.Unmount()
-	defer executor.MountStubsCleaner(rootfsPath, mounts, meta.RemoveMountStubsRecursive)()
+	defer executor.MountStubsCleaner(ctx, rootfsPath, mounts, meta.RemoveMountStubsRecursive)()
 
 
 	uid, gid, sgids, err := oci.GetUser(rootfsPath, meta.User)
 	uid, gid, sgids, err := oci.GetUser(rootfsPath, meta.User)
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 
 
 	identity := idtools.Identity{
 	identity := idtools.Identity{
@@ -149,21 +150,21 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
 
 
 	newp, err := fs.RootPath(rootfsPath, meta.Cwd)
 	newp, err := fs.RootPath(rootfsPath, meta.Cwd)
 	if err != nil {
 	if err != nil {
-		return errors.Wrapf(err, "working dir %s points to invalid target", newp)
+		return nil, errors.Wrapf(err, "working dir %s points to invalid target", newp)
 	}
 	}
 	if _, err := os.Stat(newp); err != nil {
 	if _, err := os.Stat(newp); err != nil {
 		if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
 		if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
-			return errors.Wrapf(err, "failed to create working directory %s", newp)
+			return nil, errors.Wrapf(err, "failed to create working directory %s", newp)
 		}
 		}
 	}
 	}
 
 
 	provider, ok := w.networkProviders[meta.NetMode]
 	provider, ok := w.networkProviders[meta.NetMode]
 	if !ok {
 	if !ok {
-		return errors.Errorf("unknown network mode %s", meta.NetMode)
+		return nil, errors.Errorf("unknown network mode %s", meta.NetMode)
 	}
 	}
 	namespace, err := provider.New(ctx, meta.Hostname)
 	namespace, err := provider.New(ctx, meta.Hostname)
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 	defer namespace.Close()
 	defer namespace.Close()
 
 
@@ -179,13 +180,13 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
 	processMode := oci.ProcessSandbox // FIXME(AkihiroSuda)
 	processMode := oci.ProcessSandbox // FIXME(AkihiroSuda)
 	spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.selinux, w.traceSocket, opts...)
 	spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.selinux, w.traceSocket, opts...)
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 	defer cleanup()
 	defer cleanup()
 	spec.Process.Terminal = meta.Tty
 	spec.Process.Terminal = meta.Tty
 	if w.rootless {
 	if w.rootless {
 		if err := rootlessspecconv.ToRootless(spec); err != nil {
 		if err := rootlessspecconv.ToRootless(spec); err != nil {
-			return err
+			return nil, err
 		}
 		}
 	}
 	}
 
 
@@ -193,7 +194,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
 		containerd.WithSpec(spec),
 		containerd.WithSpec(spec),
 	)
 	)
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 
 
 	defer func() {
 	defer func() {
@@ -214,7 +215,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
 		Options: []string{"rbind"},
 		Options: []string{"rbind"},
 	}}))
 	}}))
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 
 
 	defer func() {
 	defer func() {
@@ -225,7 +226,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
 
 
 	if nn, ok := namespace.(OnCreateRuntimer); ok {
 	if nn, ok := namespace.(OnCreateRuntimer); ok {
 		if err := nn.OnCreateRuntime(task.Pid()); err != nil {
 		if err := nn.OnCreateRuntime(task.Pid()); err != nil {
-			return err
+			return nil, err
 		}
 		}
 	}
 	}
 
 
@@ -238,7 +239,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
 			}
 			}
 		})
 		})
 	})
 	})
-	return err
+	return nil, err
 }
 }
 
 
 func (w *containerdExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) (err error) {
 func (w *containerdExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) (err error) {

+ 2 - 1
vendor/github.com/moby/buildkit/executor/executor.go

@@ -6,6 +6,7 @@ import (
 	"net"
 	"net"
 	"syscall"
 	"syscall"
 
 
+	resourcestypes "github.com/moby/buildkit/executor/resources/types"
 	"github.com/moby/buildkit/snapshot"
 	"github.com/moby/buildkit/snapshot"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/solver/pb"
 )
 )
@@ -55,7 +56,7 @@ type Executor interface {
 	// Run will start a container for the given process with rootfs, mounts.
 	// Run will start a container for the given process with rootfs, mounts.
 	// `id` is an optional name for the container so it can be referenced later via Exec.
 	// `id` is an optional name for the container so it can be referenced later via Exec.
 	// `started` is an optional channel that will be closed when the container setup completes and has started running.
 	// `started` is an optional channel that will be closed when the container setup completes and has started running.
-	Run(ctx context.Context, id string, rootfs Mount, mounts []Mount, process ProcessInfo, started chan<- struct{}) error
+	Run(ctx context.Context, id string, rootfs Mount, mounts []Mount, process ProcessInfo, started chan<- struct{}) (resourcestypes.Recorder, error)
 	// Exec will start a process in container matching `id`. An error will be returned
 	// Exec will start a process in container matching `id`. An error will be returned
 	// if the container failed to start (via Run) or has exited before Exec is called.
 	// if the container failed to start (via Run) or has exited before Exec is called.
 	Exec(ctx context.Context, id string, process ProcessInfo) error
 	Exec(ctx context.Context, id string, process ProcessInfo) error

+ 2 - 2
vendor/github.com/moby/buildkit/executor/oci/hosts.go

@@ -20,9 +20,9 @@ func GetHostsFile(ctx context.Context, stateDir string, extraHosts []executor.Ho
 		return makeHostsFile(stateDir, extraHosts, idmap, hostname)
 		return makeHostsFile(stateDir, extraHosts, idmap, hostname)
 	}
 	}
 
 
-	_, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) {
+	_, err := g.Do(ctx, stateDir, func(ctx context.Context) (struct{}, error) {
 		_, _, err := makeHostsFile(stateDir, nil, idmap, hostname)
 		_, _, err := makeHostsFile(stateDir, nil, idmap, hostname)
-		return nil, err
+		return struct{}{}, err
 	})
 	})
 	if err != nil {
 	if err != nil {
 		return "", nil, err
 		return "", nil, err

+ 28 - 32
vendor/github.com/moby/buildkit/executor/oci/resolvconf.go

@@ -11,12 +11,12 @@ import (
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
 
 
-var g flightcontrol.Group
+var g flightcontrol.Group[struct{}]
 var notFirstRun bool
 var notFirstRun bool
 var lastNotEmpty bool
 var lastNotEmpty bool
 
 
 // overridden by tests
 // overridden by tests
-var resolvconfGet = resolvconf.Get
+var resolvconfPath = resolvconf.Path
 
 
 type DNSConfig struct {
 type DNSConfig struct {
 	Nameservers   []string
 	Nameservers   []string
@@ -26,7 +26,7 @@ type DNSConfig struct {
 
 
 func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.IdentityMapping, dns *DNSConfig) (string, error) {
 func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.IdentityMapping, dns *DNSConfig) (string, error) {
 	p := filepath.Join(stateDir, "resolv.conf")
 	p := filepath.Join(stateDir, "resolv.conf")
-	_, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) {
+	_, err := g.Do(ctx, stateDir, func(ctx context.Context) (struct{}, error) {
 		generate := !notFirstRun
 		generate := !notFirstRun
 		notFirstRun = true
 		notFirstRun = true
 
 
@@ -34,15 +34,15 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity
 			fi, err := os.Stat(p)
 			fi, err := os.Stat(p)
 			if err != nil {
 			if err != nil {
 				if !errors.Is(err, os.ErrNotExist) {
 				if !errors.Is(err, os.ErrNotExist) {
-					return "", err
+					return struct{}{}, err
 				}
 				}
 				generate = true
 				generate = true
 			}
 			}
 			if !generate {
 			if !generate {
-				fiMain, err := os.Stat(resolvconf.Path())
+				fiMain, err := os.Stat(resolvconfPath())
 				if err != nil {
 				if err != nil {
 					if !errors.Is(err, os.ErrNotExist) {
 					if !errors.Is(err, os.ErrNotExist) {
-						return nil, err
+						return struct{}{}, err
 					}
 					}
 					if lastNotEmpty {
 					if lastNotEmpty {
 						generate = true
 						generate = true
@@ -57,63 +57,59 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity
 		}
 		}
 
 
 		if !generate {
 		if !generate {
-			return "", nil
+			return struct{}{}, nil
 		}
 		}
 
 
-		var dt []byte
-		f, err := resolvconfGet()
-		if err != nil {
-			if !errors.Is(err, os.ErrNotExist) {
-				return "", err
-			}
-		} else {
-			dt = f.Content
+		dt, err := os.ReadFile(resolvconfPath())
+		if err != nil && !errors.Is(err, os.ErrNotExist) {
+			return struct{}{}, err
 		}
 		}
 
 
+		var f *resolvconf.File
+		tmpPath := p + ".tmp"
 		if dns != nil {
 		if dns != nil {
 			var (
 			var (
-				dnsNameservers   = resolvconf.GetNameservers(dt, resolvconf.IP)
-				dnsSearchDomains = resolvconf.GetSearchDomains(dt)
-				dnsOptions       = resolvconf.GetOptions(dt)
+				dnsNameservers   = dns.Nameservers
+				dnsSearchDomains = dns.SearchDomains
+				dnsOptions       = dns.Options
 			)
 			)
-			if len(dns.Nameservers) > 0 {
-				dnsNameservers = dns.Nameservers
+			if len(dns.Nameservers) == 0 {
+				dnsNameservers = resolvconf.GetNameservers(dt, resolvconf.IP)
 			}
 			}
-			if len(dns.SearchDomains) > 0 {
-				dnsSearchDomains = dns.SearchDomains
+			if len(dns.SearchDomains) == 0 {
+				dnsSearchDomains = resolvconf.GetSearchDomains(dt)
 			}
 			}
-			if len(dns.Options) > 0 {
-				dnsOptions = dns.Options
+			if len(dns.Options) == 0 {
+				dnsOptions = resolvconf.GetOptions(dt)
 			}
 			}
 
 
-			f, err = resolvconf.Build(p+".tmp", dnsNameservers, dnsSearchDomains, dnsOptions)
+			f, err = resolvconf.Build(tmpPath, dnsNameservers, dnsSearchDomains, dnsOptions)
 			if err != nil {
 			if err != nil {
-				return "", err
+				return struct{}{}, err
 			}
 			}
 			dt = f.Content
 			dt = f.Content
 		}
 		}
 
 
 		f, err = resolvconf.FilterResolvDNS(dt, true)
 		f, err = resolvconf.FilterResolvDNS(dt, true)
 		if err != nil {
 		if err != nil {
-			return "", err
+			return struct{}{}, err
 		}
 		}
 
 
-		tmpPath := p + ".tmp"
 		if err := os.WriteFile(tmpPath, f.Content, 0644); err != nil {
 		if err := os.WriteFile(tmpPath, f.Content, 0644); err != nil {
-			return "", err
+			return struct{}{}, err
 		}
 		}
 
 
 		if idmap != nil {
 		if idmap != nil {
 			root := idmap.RootPair()
 			root := idmap.RootPair()
 			if err := os.Chown(tmpPath, root.UID, root.GID); err != nil {
 			if err := os.Chown(tmpPath, root.UID, root.GID); err != nil {
-				return "", err
+				return struct{}{}, err
 			}
 			}
 		}
 		}
 
 
 		if err := os.Rename(tmpPath, p); err != nil {
 		if err := os.Rename(tmpPath, p); err != nil {
-			return "", err
+			return struct{}{}, err
 		}
 		}
-		return "", nil
+		return struct{}{}, nil
 	})
 	})
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err

+ 14 - 7
vendor/github.com/moby/buildkit/executor/oci/spec.go

@@ -37,6 +37,12 @@ const (
 	NoProcessSandbox
 	NoProcessSandbox
 )
 )
 
 
+var tracingEnvVars = []string{
+	"OTEL_TRACES_EXPORTER=otlp",
+	"OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=" + getTracingSocket(),
+	"OTEL_EXPORTER_OTLP_TRACES_PROTOCOL=grpc",
+}
+
 func (pm ProcessMode) String() string {
 func (pm ProcessMode) String() string {
 	switch pm {
 	switch pm {
 	case ProcessSandbox:
 	case ProcessSandbox:
@@ -114,7 +120,7 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou
 
 
 	if tracingSocket != "" {
 	if tracingSocket != "" {
 		// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md
 		// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md
-		meta.Env = append(meta.Env, "OTEL_TRACES_EXPORTER=otlp", "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=unix:///dev/otel-grpc.sock", "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL=grpc")
+		meta.Env = append(meta.Env, tracingEnvVars...)
 		meta.Env = append(meta.Env, traceexec.Environ(ctx)...)
 		meta.Env = append(meta.Env, traceexec.Environ(ctx)...)
 	}
 	}
 
 
@@ -131,6 +137,12 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou
 		return nil, nil, err
 		return nil, nil, err
 	}
 	}
 
 
+	if cgroupNamespaceSupported() {
+		s.Linux.Namespaces = append(s.Linux.Namespaces, specs.LinuxNamespace{
+			Type: specs.CgroupNamespace,
+		})
+	}
+
 	if len(meta.Ulimit) == 0 {
 	if len(meta.Ulimit) == 0 {
 		// reset open files limit
 		// reset open files limit
 		s.Process.Rlimits = nil
 		s.Process.Rlimits = nil
@@ -185,12 +197,7 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou
 	}
 	}
 
 
 	if tracingSocket != "" {
 	if tracingSocket != "" {
-		s.Mounts = append(s.Mounts, specs.Mount{
-			Destination: "/dev/otel-grpc.sock",
-			Type:        "bind",
-			Source:      tracingSocket,
-			Options:     []string{"ro", "rbind"},
-		})
+		s.Mounts = append(s.Mounts, getTracingSocketMount(tracingSocket))
 	}
 	}
 
 
 	s.Mounts = dedupMounts(s.Mounts)
 	s.Mounts = dedupMounts(s.Mounts)

+ 33 - 0
vendor/github.com/moby/buildkit/executor/oci/spec_unix.go

@@ -6,7 +6,9 @@ package oci
 import (
 import (
 	"context"
 	"context"
 	"fmt"
 	"fmt"
+	"os"
 	"strings"
 	"strings"
+	"sync"
 
 
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/containers"
 	"github.com/containerd/containerd/oci"
 	"github.com/containerd/containerd/oci"
@@ -21,6 +23,15 @@ import (
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
 
 
+var (
+	cgroupNSOnce     sync.Once
+	supportsCgroupNS bool
+)
+
+const (
+	tracingSocketPath = "/dev/otel-grpc.sock"
+)
+
 func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) {
 func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) {
 	return []oci.SpecOpts{
 	return []oci.SpecOpts{
 		// https://github.com/moby/buildkit/issues/429
 		// https://github.com/moby/buildkit/issues/429
@@ -122,3 +133,25 @@ func withDefaultProfile() oci.SpecOpts {
 		return err
 		return err
 	}
 	}
 }
 }
+
+func getTracingSocketMount(socket string) specs.Mount {
+	return specs.Mount{
+		Destination: tracingSocketPath,
+		Type:        "bind",
+		Source:      socket,
+		Options:     []string{"ro", "rbind"},
+	}
+}
+
+func getTracingSocket() string {
+	return fmt.Sprintf("unix://%s", tracingSocketPath)
+}
+
+func cgroupNamespaceSupported() bool {
+	cgroupNSOnce.Do(func() {
+		if _, err := os.Stat("/proc/self/ns/cgroup"); !os.IsNotExist(err) {
+			supportsCgroupNS = true
+		}
+	})
+	return supportsCgroupNS
+}

+ 24 - 0
vendor/github.com/moby/buildkit/executor/oci/spec_windows.go

@@ -4,12 +4,20 @@
 package oci
 package oci
 
 
 import (
 import (
+	"fmt"
+	"path/filepath"
+
 	"github.com/containerd/containerd/oci"
 	"github.com/containerd/containerd/oci"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/solver/pb"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 )
 )
 
 
+const (
+	tracingSocketPath = "//./pipe/otel-grpc"
+)
+
 func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) {
 func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) {
 	return nil, nil
 	return nil, nil
 }
 }
@@ -43,3 +51,19 @@ func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) {
 	}
 	}
 	return nil, errors.New("no support for POSIXRlimit on Windows")
 	return nil, errors.New("no support for POSIXRlimit on Windows")
 }
 }
+
+func getTracingSocketMount(socket string) specs.Mount {
+	return specs.Mount{
+		Destination: filepath.FromSlash(tracingSocketPath),
+		Source:      socket,
+		Options:     []string{"ro"},
+	}
+}
+
+func getTracingSocket() string {
+	return fmt.Sprintf("npipe://%s", filepath.ToSlash(tracingSocketPath))
+}
+
+func cgroupNamespaceSupported() bool {
+	return false
+}

+ 141 - 0
vendor/github.com/moby/buildkit/executor/resources/cpu.go

@@ -0,0 +1,141 @@
+package resources
+
+import (
+	"bufio"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"syscall"
+
+	"github.com/moby/buildkit/executor/resources/types"
+	"github.com/pkg/errors"
+)
+
+const (
+	cpuUsageUsec     = "usage_usec"
+	cpuUserUsec      = "user_usec"
+	cpuSystemUsec    = "system_usec"
+	cpuNrPeriods     = "nr_periods"
+	cpuNrThrottled   = "nr_throttled"
+	cpuThrottledUsec = "throttled_usec"
+)
+
+func getCgroupCPUStat(cgroupPath string) (*types.CPUStat, error) {
+	cpuStat := &types.CPUStat{}
+
+	// Read cpu.stat file
+	cpuStatFile, err := os.Open(filepath.Join(cgroupPath, "cpu.stat"))
+	if err != nil {
+		if errors.Is(err, os.ErrNotExist) {
+			return nil, nil
+		}
+		return nil, err
+	}
+	defer cpuStatFile.Close()
+
+	scanner := bufio.NewScanner(cpuStatFile)
+	for scanner.Scan() {
+		line := scanner.Text()
+		fields := strings.Fields(line)
+
+		if len(fields) < 2 {
+			continue
+		}
+
+		key := fields[0]
+		value, err := strconv.ParseUint(fields[1], 10, 64)
+		if err != nil {
+			continue
+		}
+
+		switch key {
+		case cpuUsageUsec:
+			cpuStat.UsageNanos = uint64Ptr(value * 1000)
+		case cpuUserUsec:
+			cpuStat.UserNanos = uint64Ptr(value * 1000)
+		case cpuSystemUsec:
+			cpuStat.SystemNanos = uint64Ptr(value * 1000)
+		case cpuNrPeriods:
+			cpuStat.NrPeriods = new(uint32)
+			*cpuStat.NrPeriods = uint32(value)
+		case cpuNrThrottled:
+			cpuStat.NrThrottled = new(uint32)
+			*cpuStat.NrThrottled = uint32(value)
+		case cpuThrottledUsec:
+			cpuStat.ThrottledNanos = uint64Ptr(value * 1000)
+		}
+	}
+
+	if err := scanner.Err(); err != nil {
+		return nil, err
+	}
+
+	// Read cpu.pressure file
+	pressure, err := parsePressureFile(filepath.Join(cgroupPath, "cpu.pressure"))
+	if err == nil {
+		cpuStat.Pressure = pressure
+	}
+
+	return cpuStat, nil
+}
+func parsePressureFile(filename string) (*types.Pressure, error) {
+	content, err := os.ReadFile(filename)
+	if err != nil {
+		if errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTSUP) { // pressure file requires CONFIG_PSI
+			return nil, nil
+		}
+		return nil, err
+	}
+
+	lines := strings.Split(string(content), "\n")
+
+	pressure := &types.Pressure{}
+	for _, line := range lines {
+		// Skip empty lines
+		if len(strings.TrimSpace(line)) == 0 {
+			continue
+		}
+
+		fields := strings.Fields(line)
+		prefix := fields[0]
+		pressureValues := &types.PressureValues{}
+
+		for i := 1; i < len(fields); i++ {
+			keyValue := strings.Split(fields[i], "=")
+			key := keyValue[0]
+			valueStr := keyValue[1]
+
+			if key == "total" {
+				totalValue, err := strconv.ParseUint(valueStr, 10, 64)
+				if err != nil {
+					return nil, err
+				}
+				pressureValues.Total = &totalValue
+			} else {
+				value, err := strconv.ParseFloat(valueStr, 64)
+				if err != nil {
+					return nil, err
+				}
+
+				switch key {
+				case "avg10":
+					pressureValues.Avg10 = &value
+				case "avg60":
+					pressureValues.Avg60 = &value
+				case "avg300":
+					pressureValues.Avg300 = &value
+				}
+			}
+		}
+
+		switch prefix {
+		case "some":
+			pressure.Some = pressureValues
+		case "full":
+			pressure.Full = pressureValues
+		}
+	}
+
+	return pressure, nil
+}

+ 117 - 0
vendor/github.com/moby/buildkit/executor/resources/io.go

@@ -0,0 +1,117 @@
+package resources
+
+import (
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+
+	"github.com/moby/buildkit/executor/resources/types"
+	"github.com/pkg/errors"
+)
+
+const (
+	ioStatFile     = "io.stat"
+	ioPressureFile = "io.pressure"
+)
+
+const (
+	ioReadBytes    = "rbytes"
+	ioWriteBytes   = "wbytes"
+	ioDiscardBytes = "dbytes"
+	ioReadIOs      = "rios"
+	ioWriteIOs     = "wios"
+	ioDiscardIOs   = "dios"
+)
+
+func getCgroupIOStat(cgroupPath string) (*types.IOStat, error) {
+	ioStatPath := filepath.Join(cgroupPath, ioStatFile)
+	data, err := os.ReadFile(ioStatPath)
+	if err != nil {
+		if errors.Is(err, os.ErrNotExist) {
+			return nil, nil
+		}
+		return nil, errors.Wrapf(err, "failed to read %s", ioStatPath)
+	}
+
+	ioStat := &types.IOStat{}
+	lines := strings.Split(string(data), "\n")
+	for _, line := range lines {
+		parts := strings.Fields(line)
+		if len(parts) < 2 {
+			continue
+		}
+
+		for _, part := range parts[1:] {
+			key, value := parseKeyValue(part)
+			if key == "" {
+				continue
+			}
+
+			switch key {
+			case ioReadBytes:
+				if ioStat.ReadBytes != nil {
+					*ioStat.ReadBytes += value
+				} else {
+					ioStat.ReadBytes = uint64Ptr(value)
+				}
+			case ioWriteBytes:
+				if ioStat.WriteBytes != nil {
+					*ioStat.WriteBytes += value
+				} else {
+					ioStat.WriteBytes = uint64Ptr(value)
+				}
+			case ioDiscardBytes:
+				if ioStat.DiscardBytes != nil {
+					*ioStat.DiscardBytes += value
+				} else {
+					ioStat.DiscardBytes = uint64Ptr(value)
+				}
+			case ioReadIOs:
+				if ioStat.ReadIOs != nil {
+					*ioStat.ReadIOs += value
+				} else {
+					ioStat.ReadIOs = uint64Ptr(value)
+				}
+			case ioWriteIOs:
+				if ioStat.WriteIOs != nil {
+					*ioStat.WriteIOs += value
+				} else {
+					ioStat.WriteIOs = uint64Ptr(value)
+				}
+			case ioDiscardIOs:
+				if ioStat.DiscardIOs != nil {
+					*ioStat.DiscardIOs += value
+				} else {
+					ioStat.DiscardIOs = uint64Ptr(value)
+				}
+			}
+		}
+	}
+
+	// Parse the pressure
+	pressure, err := parsePressureFile(filepath.Join(cgroupPath, ioPressureFile))
+	if err != nil {
+		return nil, err
+	}
+	ioStat.Pressure = pressure
+
+	return ioStat, nil
+}
+
+func parseKeyValue(kv string) (key string, value uint64) {
+	parts := strings.SplitN(kv, "=", 2)
+	if len(parts) != 2 {
+		return "", 0
+	}
+	key = parts[0]
+	value, err := strconv.ParseUint(parts[1], 10, 64)
+	if err != nil {
+		return "", 0
+	}
+	return key, value
+}
+
+func uint64Ptr(v uint64) *uint64 {
+	return &v
+}

+ 159 - 0
vendor/github.com/moby/buildkit/executor/resources/memory.go

@@ -0,0 +1,159 @@
+package resources
+
+import (
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+
+	"github.com/moby/buildkit/executor/resources/types"
+	"github.com/pkg/errors"
+)
+
+const (
+	memoryStatFile        = "memory.stat"
+	memoryPressureFile    = "memory.pressure"
+	memoryPeakFile        = "memory.peak"
+	memorySwapCurrentFile = "memory.swap.current"
+	memoryEventsFile      = "memory.events"
+)
+
+const (
+	memoryAnon          = "anon"
+	memoryFile          = "file"
+	memoryKernelStack   = "kernel_stack"
+	memoryPageTables    = "pagetables"
+	memorySock          = "sock"
+	memoryShmem         = "shmem"
+	memoryFileMapped    = "file_mapped"
+	memoryFileDirty     = "file_dirty"
+	memoryFileWriteback = "file_writeback"
+	memorySlab          = "slab"
+	memoryPgscan        = "pgscan"
+	memoryPgsteal       = "pgsteal"
+	memoryPgfault       = "pgfault"
+	memoryPgmajfault    = "pgmajfault"
+
+	memoryLow     = "low"
+	memoryHigh    = "high"
+	memoryMax     = "max"
+	memoryOom     = "oom"
+	memoryOomKill = "oom_kill"
+)
+
+func getCgroupMemoryStat(path string) (*types.MemoryStat, error) {
+	memoryStat := &types.MemoryStat{}
+
+	// Parse memory.stat
+	err := parseKeyValueFile(filepath.Join(path, memoryStatFile), func(key string, value uint64) {
+		switch key {
+		case memoryAnon:
+			memoryStat.Anon = &value
+		case memoryFile:
+			memoryStat.File = &value
+		case memoryKernelStack:
+			memoryStat.KernelStack = &value
+		case memoryPageTables:
+			memoryStat.PageTables = &value
+		case memorySock:
+			memoryStat.Sock = &value
+		case memoryShmem:
+			memoryStat.Shmem = &value
+		case memoryFileMapped:
+			memoryStat.FileMapped = &value
+		case memoryFileDirty:
+			memoryStat.FileDirty = &value
+		case memoryFileWriteback:
+			memoryStat.FileWriteback = &value
+		case memorySlab:
+			memoryStat.Slab = &value
+		case memoryPgscan:
+			memoryStat.Pgscan = &value
+		case memoryPgsteal:
+			memoryStat.Pgsteal = &value
+		case memoryPgfault:
+			memoryStat.Pgfault = &value
+		case memoryPgmajfault:
+			memoryStat.Pgmajfault = &value
+		}
+	})
+	if err != nil {
+		if errors.Is(err, os.ErrNotExist) {
+			return nil, nil
+		}
+		return nil, err
+	}
+
+	pressure, err := parsePressureFile(filepath.Join(path, memoryPressureFile))
+	if err != nil {
+		return nil, err
+	}
+	if pressure != nil {
+		memoryStat.Pressure = pressure
+	}
+
+	err = parseKeyValueFile(filepath.Join(path, memoryEventsFile), func(key string, value uint64) {
+		switch key {
+		case memoryLow:
+			memoryStat.LowEvents = value
+		case memoryHigh:
+			memoryStat.HighEvents = value
+		case memoryMax:
+			memoryStat.MaxEvents = value
+		case memoryOom:
+			memoryStat.OomEvents = value
+		case memoryOomKill:
+			memoryStat.OomKillEvents = value
+		}
+	})
+
+	if err != nil {
+		return nil, err
+	}
+
+	peak, err := parseSingleValueFile(filepath.Join(path, memoryPeakFile))
+	if err != nil {
+		if !errors.Is(err, os.ErrNotExist) {
+			return nil, err
+		}
+	} else {
+		memoryStat.Peak = &peak
+	}
+
+	swap, err := parseSingleValueFile(filepath.Join(path, memorySwapCurrentFile))
+	if err != nil {
+		if !errors.Is(err, os.ErrNotExist) {
+			return nil, err
+		}
+	} else {
+		memoryStat.SwapBytes = &swap
+	}
+
+	return memoryStat, nil
+}
+
+func parseKeyValueFile(filePath string, callback func(key string, value uint64)) error {
+	content, err := os.ReadFile(filePath)
+	if err != nil {
+		return errors.Wrapf(err, "failed to read %s", filePath)
+	}
+
+	lines := strings.Split(string(content), "\n")
+	for _, line := range lines {
+		if len(strings.TrimSpace(line)) == 0 {
+			continue
+		}
+
+		fields := strings.Fields(line)
+		key := fields[0]
+		valueStr := fields[1]
+		value, err := strconv.ParseUint(valueStr, 10, 64)
+		if err != nil {
+			return errors.Wrapf(err, "failed to parse value for %s", key)
+		}
+
+		callback(key, value)
+	}
+
+	return nil
+}

+ 287 - 0
vendor/github.com/moby/buildkit/executor/resources/monitor.go

@@ -0,0 +1,287 @@
+package resources
+
+import (
+	"bufio"
+	"context"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/moby/buildkit/executor/resources/types"
+	"github.com/moby/buildkit/util/network"
+	"github.com/prometheus/procfs"
+	"github.com/sirupsen/logrus"
+)
+
+const (
+	cgroupProcsFile       = "cgroup.procs"
+	cgroupControllersFile = "cgroup.controllers"
+	cgroupSubtreeFile     = "cgroup.subtree_control"
+	defaultMountpoint     = "/sys/fs/cgroup"
+	initGroup             = "init"
+)
+
+var initOnce sync.Once
+var isCgroupV2 bool
+
+type cgroupRecord struct {
+	once         sync.Once
+	ns           string
+	sampler      *Sub[*types.Sample]
+	closeSampler func() error
+	samples      []*types.Sample
+	err          error
+	done         chan struct{}
+	monitor      *Monitor
+	netSampler   NetworkSampler
+	startCPUStat *procfs.CPUStat
+	sysCPUStat   *types.SysCPUStat
+}
+
+func (r *cgroupRecord) Wait() error {
+	go r.close()
+	<-r.done
+	return r.err
+}
+
+func (r *cgroupRecord) Start() {
+	if stat, err := r.monitor.proc.Stat(); err == nil {
+		r.startCPUStat = &stat.CPUTotal
+	}
+	s := NewSampler(2*time.Second, 10, r.sample)
+	r.sampler = s.Record()
+	r.closeSampler = s.Close
+}
+
+func (r *cgroupRecord) Close() {
+	r.close()
+}
+
+func (r *cgroupRecord) CloseAsync(next func(context.Context) error) error {
+	go func() {
+		r.close()
+		next(context.TODO())
+	}()
+	return nil
+}
+
+func (r *cgroupRecord) close() {
+	r.once.Do(func() {
+		defer close(r.done)
+		go func() {
+			r.monitor.mu.Lock()
+			delete(r.monitor.records, r.ns)
+			r.monitor.mu.Unlock()
+		}()
+		if r.sampler == nil {
+			return
+		}
+		s, err := r.sampler.Close(true)
+		if err != nil {
+			r.err = err
+		} else {
+			r.samples = s
+		}
+		r.closeSampler()
+
+		if r.startCPUStat != nil {
+			stat, err := r.monitor.proc.Stat()
+			if err == nil {
+				cpu := &types.SysCPUStat{
+					User:      stat.CPUTotal.User - r.startCPUStat.User,
+					Nice:      stat.CPUTotal.Nice - r.startCPUStat.Nice,
+					System:    stat.CPUTotal.System - r.startCPUStat.System,
+					Idle:      stat.CPUTotal.Idle - r.startCPUStat.Idle,
+					Iowait:    stat.CPUTotal.Iowait - r.startCPUStat.Iowait,
+					IRQ:       stat.CPUTotal.IRQ - r.startCPUStat.IRQ,
+					SoftIRQ:   stat.CPUTotal.SoftIRQ - r.startCPUStat.SoftIRQ,
+					Steal:     stat.CPUTotal.Steal - r.startCPUStat.Steal,
+					Guest:     stat.CPUTotal.Guest - r.startCPUStat.Guest,
+					GuestNice: stat.CPUTotal.GuestNice - r.startCPUStat.GuestNice,
+				}
+				r.sysCPUStat = cpu
+			}
+		}
+	})
+}
+
+func (r *cgroupRecord) sample(tm time.Time) (*types.Sample, error) {
+	cpu, err := getCgroupCPUStat(filepath.Join(defaultMountpoint, r.ns))
+	if err != nil {
+		return nil, err
+	}
+	memory, err := getCgroupMemoryStat(filepath.Join(defaultMountpoint, r.ns))
+	if err != nil {
+		return nil, err
+	}
+	io, err := getCgroupIOStat(filepath.Join(defaultMountpoint, r.ns))
+	if err != nil {
+		return nil, err
+	}
+	pids, err := getCgroupPIDsStat(filepath.Join(defaultMountpoint, r.ns))
+	if err != nil {
+		return nil, err
+	}
+	sample := &types.Sample{
+		Timestamp_: tm,
+		CPUStat:    cpu,
+		MemoryStat: memory,
+		IOStat:     io,
+		PIDsStat:   pids,
+	}
+	if r.netSampler != nil {
+		net, err := r.netSampler.Sample()
+		if err != nil {
+			return nil, err
+		}
+		sample.NetStat = net
+	}
+	return sample, nil
+}
+
+func (r *cgroupRecord) Samples() (*types.Samples, error) {
+	<-r.done
+	if r.err != nil {
+		return nil, r.err
+	}
+	return &types.Samples{
+		Samples:    r.samples,
+		SysCPUStat: r.sysCPUStat,
+	}, nil
+}
+
+type nopRecord struct {
+}
+
+func (r *nopRecord) Wait() error {
+	return nil
+}
+
+func (r *nopRecord) Samples() (*types.Samples, error) {
+	return nil, nil
+}
+
+func (r *nopRecord) Close() {
+}
+
+func (r *nopRecord) CloseAsync(next func(context.Context) error) error {
+	return next(context.TODO())
+}
+
+func (r *nopRecord) Start() {
+}
+
+type Monitor struct {
+	mu      sync.Mutex
+	closed  chan struct{}
+	records map[string]*cgroupRecord
+	proc    procfs.FS
+}
+
+type NetworkSampler interface {
+	Sample() (*network.Sample, error)
+}
+
+type RecordOpt struct {
+	NetworkSampler NetworkSampler
+}
+
+func (m *Monitor) RecordNamespace(ns string, opt RecordOpt) (types.Recorder, error) {
+	isClosed := false
+	select {
+	case <-m.closed:
+		isClosed = true
+	default:
+	}
+	if !isCgroupV2 || isClosed {
+		return &nopRecord{}, nil
+	}
+	r := &cgroupRecord{
+		ns:         ns,
+		done:       make(chan struct{}),
+		monitor:    m,
+		netSampler: opt.NetworkSampler,
+	}
+	m.mu.Lock()
+	m.records[ns] = r
+	m.mu.Unlock()
+	return r, nil
+}
+
+func (m *Monitor) Close() error {
+	close(m.closed)
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	for _, r := range m.records {
+		r.close()
+	}
+	return nil
+}
+
+func NewMonitor() (*Monitor, error) {
+	initOnce.Do(func() {
+		isCgroupV2 = isCgroup2()
+		if !isCgroupV2 {
+			return
+		}
+		if err := prepareCgroupControllers(); err != nil {
+			logrus.Warnf("failed to prepare cgroup controllers: %+v", err)
+		}
+	})
+
+	fs, err := procfs.NewDefaultFS()
+	if err != nil {
+		return nil, err
+	}
+
+	return &Monitor{
+		closed:  make(chan struct{}),
+		records: make(map[string]*cgroupRecord),
+		proc:    fs,
+	}, nil
+}
+
+func prepareCgroupControllers() error {
+	v, ok := os.LookupEnv("BUILDKIT_SETUP_CGROUPV2_ROOT")
+	if !ok {
+		return nil
+	}
+	if b, _ := strconv.ParseBool(v); !b {
+		return nil
+	}
+	// move current process to init cgroup
+	if err := os.MkdirAll(filepath.Join(defaultMountpoint, initGroup), 0755); err != nil {
+		return err
+	}
+	f, err := os.OpenFile(filepath.Join(defaultMountpoint, cgroupProcsFile), os.O_RDONLY, 0)
+	if err != nil {
+		return err
+	}
+	s := bufio.NewScanner(f)
+	for s.Scan() {
+		if err := os.WriteFile(filepath.Join(defaultMountpoint, initGroup, cgroupProcsFile), s.Bytes(), 0); err != nil {
+			return err
+		}
+	}
+	if err := f.Close(); err != nil {
+		return err
+	}
+	dt, err := os.ReadFile(filepath.Join(defaultMountpoint, cgroupControllersFile))
+	if err != nil {
+		return err
+	}
+	for _, c := range strings.Split(string(dt), " ") {
+		if c == "" {
+			continue
+		}
+		if err := os.WriteFile(filepath.Join(defaultMountpoint, cgroupSubtreeFile), []byte("+"+c), 0); err != nil {
+			// ignore error
+			logrus.Warnf("failed to enable cgroup controller %q: %+v", c, err)
+		}
+	}
+	return nil
+}

+ 15 - 0
vendor/github.com/moby/buildkit/executor/resources/monitor_linux.go

@@ -0,0 +1,15 @@
+//go:build linux
+// +build linux
+
+package resources
+
+import "golang.org/x/sys/unix"
+
+func isCgroup2() bool {
+	var st unix.Statfs_t
+	err := unix.Statfs(defaultMountpoint, &st)
+	if err != nil {
+		return false
+	}
+	return st.Type == unix.CGROUP2_SUPER_MAGIC
+}

+ 8 - 0
vendor/github.com/moby/buildkit/executor/resources/monitor_nolinux.go

@@ -0,0 +1,8 @@
+//go:build !linux
+// +build !linux
+
+package resources
+
+func isCgroup2() bool {
+	return false
+}

+ 45 - 0
vendor/github.com/moby/buildkit/executor/resources/pids.go

@@ -0,0 +1,45 @@
+package resources
+
+import (
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+
+	"github.com/moby/buildkit/executor/resources/types"
+	"github.com/pkg/errors"
+)
+
+const (
+	pidsCurrentFile = "pids.current"
+)
+
+func getCgroupPIDsStat(path string) (*types.PIDsStat, error) {
+	pidsStat := &types.PIDsStat{}
+
+	v, err := parseSingleValueFile(filepath.Join(path, pidsCurrentFile))
+	if err != nil {
+		if !errors.Is(err, os.ErrNotExist) {
+			return nil, err
+		}
+	} else {
+		pidsStat.Current = &v
+	}
+
+	return pidsStat, nil
+}
+
+func parseSingleValueFile(filePath string) (uint64, error) {
+	content, err := os.ReadFile(filePath)
+	if err != nil {
+		return 0, errors.Wrapf(err, "failed to read %s", filePath)
+	}
+
+	valueStr := strings.TrimSpace(string(content))
+	value, err := strconv.ParseUint(valueStr, 10, 64)
+	if err != nil {
+		return 0, errors.Wrapf(err, "failed to parse value: %s", valueStr)
+	}
+
+	return value, nil
+}

+ 139 - 0
vendor/github.com/moby/buildkit/executor/resources/sampler.go

@@ -0,0 +1,139 @@
+package resources
+
+import (
+	"sync"
+	"time"
+)
+
+type WithTimestamp interface {
+	Timestamp() time.Time
+}
+
+type Sampler[T WithTimestamp] struct {
+	mu          sync.Mutex
+	minInterval time.Duration
+	maxSamples  int
+	callback    func(ts time.Time) (T, error)
+	doneOnce    sync.Once
+	done        chan struct{}
+	running     bool
+	subs        map[*Sub[T]]struct{}
+}
+
+type Sub[T WithTimestamp] struct {
+	sampler  *Sampler[T]
+	interval time.Duration
+	first    time.Time
+	last     time.Time
+	samples  []T
+	err      error
+}
+
+func (s *Sub[T]) Close(captureLast bool) ([]T, error) {
+	s.sampler.mu.Lock()
+	delete(s.sampler.subs, s)
+
+	if s.err != nil {
+		s.sampler.mu.Unlock()
+		return nil, s.err
+	}
+	current := s.first
+	out := make([]T, 0, len(s.samples)+1)
+	for i, v := range s.samples {
+		ts := v.Timestamp()
+		if i == 0 || ts.Sub(current) >= s.interval {
+			out = append(out, v)
+			current = ts
+		}
+	}
+	s.sampler.mu.Unlock()
+
+	if captureLast {
+		v, err := s.sampler.callback(time.Now())
+		if err != nil {
+			return nil, err
+		}
+		out = append(out, v)
+	}
+
+	return out, nil
+}
+
+func NewSampler[T WithTimestamp](minInterval time.Duration, maxSamples int, cb func(time.Time) (T, error)) *Sampler[T] {
+	s := &Sampler[T]{
+		minInterval: minInterval,
+		maxSamples:  maxSamples,
+		callback:    cb,
+		done:        make(chan struct{}),
+		subs:        make(map[*Sub[T]]struct{}),
+	}
+	return s
+}
+
+func (s *Sampler[T]) Record() *Sub[T] {
+	ss := &Sub[T]{
+		interval: s.minInterval,
+		first:    time.Now(),
+		sampler:  s,
+	}
+	s.mu.Lock()
+	s.subs[ss] = struct{}{}
+	if !s.running {
+		s.running = true
+		go s.run()
+	}
+	s.mu.Unlock()
+	return ss
+}
+
+func (s *Sampler[T]) run() {
+	ticker := time.NewTimer(s.minInterval)
+	for {
+		select {
+		case <-s.done:
+			ticker.Stop()
+			return
+		case <-ticker.C:
+			tm := time.Now()
+			s.mu.Lock()
+			active := make([]*Sub[T], 0, len(s.subs))
+			for ss := range s.subs {
+				if tm.Sub(ss.last) < ss.interval {
+					continue
+				}
+				ss.last = tm
+				active = append(active, ss)
+			}
+			s.mu.Unlock()
+			ticker = time.NewTimer(s.minInterval)
+			if len(active) == 0 {
+				continue
+			}
+			value, err := s.callback(tm)
+			s.mu.Lock()
+			for _, ss := range active {
+				if _, found := s.subs[ss]; !found {
+					continue // skip if Close() was called while the lock was released
+				}
+				if err != nil {
+					ss.err = err
+				} else {
+					ss.samples = append(ss.samples, value)
+					ss.err = nil
+				}
+				dur := ss.last.Sub(ss.first)
+				if time.Duration(ss.interval)*time.Duration(s.maxSamples) <= dur {
+					ss.interval *= 2
+				}
+			}
+			s.mu.Unlock()
+		}
+	}
+}
+
+func (s *Sampler[T]) Close() error {
+	s.doneOnce.Do(func() {
+		close(s.done)
+	})
+	return nil
+}

+ 9 - 0
vendor/github.com/moby/buildkit/executor/resources/sys.go

@@ -0,0 +1,9 @@
+package resources
+
+import "github.com/moby/buildkit/executor/resources/types"
+
+type SysSampler = Sub[*types.SysSample]
+
+func NewSysSampler() (*Sampler[*types.SysSample], error) {
+	return newSysSampler()
+}

+ 93 - 0
vendor/github.com/moby/buildkit/executor/resources/sys_linux.go

@@ -0,0 +1,93 @@
+package resources
+
+import (
+	"os"
+	"time"
+
+	"github.com/moby/buildkit/executor/resources/types"
+	"github.com/prometheus/procfs"
+)
+
+func newSysSampler() (*Sampler[*types.SysSample], error) {
+	pfs, err := procfs.NewDefaultFS()
+	if err != nil {
+		return nil, err
+	}
+
+	return NewSampler(2*time.Second, 20, func(tm time.Time) (*types.SysSample, error) {
+		return sampleSys(pfs, tm)
+	}), nil
+}
+
+func sampleSys(proc procfs.FS, tm time.Time) (*types.SysSample, error) {
+	stat, err := proc.Stat()
+	if err != nil {
+		return nil, err
+	}
+
+	s := &types.SysSample{
+		Timestamp_: tm,
+	}
+
+	s.CPUStat = &types.SysCPUStat{
+		User:      stat.CPUTotal.User,
+		Nice:      stat.CPUTotal.Nice,
+		System:    stat.CPUTotal.System,
+		Idle:      stat.CPUTotal.Idle,
+		Iowait:    stat.CPUTotal.Iowait,
+		IRQ:       stat.CPUTotal.IRQ,
+		SoftIRQ:   stat.CPUTotal.SoftIRQ,
+		Steal:     stat.CPUTotal.Steal,
+		Guest:     stat.CPUTotal.Guest,
+		GuestNice: stat.CPUTotal.GuestNice,
+	}
+
+	s.ProcStat = &types.ProcStat{
+		ContextSwitches:  stat.ContextSwitches,
+		ProcessCreated:   stat.ProcessCreated,
+		ProcessesRunning: stat.ProcessesRunning,
+	}
+
+	mem, err := proc.Meminfo()
+	if err != nil {
+		return nil, err
+	}
+
+	s.MemoryStat = &types.SysMemoryStat{
+		Total:     mem.MemTotal,
+		Free:      mem.MemFree,
+		Buffers:   mem.Buffers,
+		Cached:    mem.Cached,
+		Active:    mem.Active,
+		Inactive:  mem.Inactive,
+		Swap:      mem.SwapTotal,
+		Available: mem.MemAvailable,
+		Dirty:     mem.Dirty,
+		Writeback: mem.Writeback,
+		Slab:      mem.Slab,
+	}
+
+	if _, err := os.Lstat("/proc/pressure"); err != nil {
+		return s, nil
+	}
+
+	cp, err := parsePressureFile("/proc/pressure/cpu")
+	if err != nil {
+		return nil, err
+	}
+	s.CPUPressure = cp
+
+	mp, err := parsePressureFile("/proc/pressure/memory")
+	if err != nil {
+		return nil, err
+	}
+	s.MemoryPressure = mp
+
+	ip, err := parsePressureFile("/proc/pressure/io")
+	if err != nil {
+		return nil, err
+	}
+	s.IOPressure = ip
+
+	return s, nil
+}

+ 9 - 0
vendor/github.com/moby/buildkit/executor/resources/sys_nolinux.go

@@ -0,0 +1,9 @@
+//go:build !linux
+
+package resources
+
+import "github.com/moby/buildkit/executor/resources/types"
+
+func newSysSampler() (*Sampler[*types.SysSample], error) {
+	return nil, nil
+}

+ 72 - 0
vendor/github.com/moby/buildkit/executor/resources/types/systypes.go

@@ -0,0 +1,72 @@
+package types
+
+import (
+	"encoding/json"
+	"math"
+	"time"
+)
+
+type SysCPUStat struct {
+	User      float64 `json:"user"`
+	Nice      float64 `json:"nice"`
+	System    float64 `json:"system"`
+	Idle      float64 `json:"idle"`
+	Iowait    float64 `json:"iowait"`
+	IRQ       float64 `json:"irq"`
+	SoftIRQ   float64 `json:"softirq"`
+	Steal     float64 `json:"steal"`
+	Guest     float64 `json:"guest"`
+	GuestNice float64 `json:"guestNice"`
+}
+
+type sysCPUStatAlias SysCPUStat // avoid recursion of MarshalJSON
+
+func (s SysCPUStat) MarshalJSON() ([]byte, error) {
+	return json.Marshal(sysCPUStatAlias{
+		User:      math.Round(s.User*1000) / 1000,
+		Nice:      math.Round(s.Nice*1000) / 1000,
+		System:    math.Round(s.System*1000) / 1000,
+		Idle:      math.Round(s.Idle*1000) / 1000,
+		Iowait:    math.Round(s.Iowait*1000) / 1000,
+		IRQ:       math.Round(s.IRQ*1000) / 1000,
+		SoftIRQ:   math.Round(s.SoftIRQ*1000) / 1000,
+		Steal:     math.Round(s.Steal*1000) / 1000,
+		Guest:     math.Round(s.Guest*1000) / 1000,
+		GuestNice: math.Round(s.GuestNice*1000) / 1000,
+	})
+}
+
+type ProcStat struct {
+	ContextSwitches  uint64 `json:"contextSwitches"`
+	ProcessCreated   uint64 `json:"processCreated"`
+	ProcessesRunning uint64 `json:"processesRunning"`
+}
+
+type SysMemoryStat struct {
+	Total     *uint64 `json:"total"`
+	Free      *uint64 `json:"free"`
+	Available *uint64 `json:"available"`
+	Buffers   *uint64 `json:"buffers"`
+	Cached    *uint64 `json:"cached"`
+	Active    *uint64 `json:"active"`
+	Inactive  *uint64 `json:"inactive"`
+	Swap      *uint64 `json:"swap"`
+	Dirty     *uint64 `json:"dirty"`
+	Writeback *uint64 `json:"writeback"`
+	Slab      *uint64 `json:"slab"`
+}
+
+type SysSample struct {
+	//nolint
+	Timestamp_     time.Time      `json:"timestamp"`
+	CPUStat        *SysCPUStat    `json:"cpuStat,omitempty"`
+	ProcStat       *ProcStat      `json:"procStat,omitempty"`
+	MemoryStat     *SysMemoryStat `json:"memoryStat,omitempty"`
+	CPUPressure    *Pressure      `json:"cpuPressure,omitempty"`
+	MemoryPressure *Pressure      `json:"memoryPressure,omitempty"`
+	IOPressure     *Pressure      `json:"ioPressure,omitempty"`
+}
+
+func (s *SysSample) Timestamp() time.Time {
+	return s.Timestamp_
+}

+ 104 - 0
vendor/github.com/moby/buildkit/executor/resources/types/types.go

@@ -0,0 +1,104 @@
+package types
+
+import (
+	"context"
+	"time"
+
+	"github.com/moby/buildkit/util/network"
+)
+
+type Recorder interface {
+	Start()
+	Close()
+	CloseAsync(func(context.Context) error) error
+	Wait() error
+	Samples() (*Samples, error)
+}
+
+type Samples struct {
+	Samples    []*Sample   `json:"samples,omitempty"`
+	SysCPUStat *SysCPUStat `json:"sysCPUStat,omitempty"`
+}
+
+// Sample represents a wrapper for sampled data of cgroupv2 controllers
+type Sample struct {
+	//nolint
+	Timestamp_ time.Time       `json:"timestamp"`
+	CPUStat    *CPUStat        `json:"cpuStat,omitempty"`
+	MemoryStat *MemoryStat     `json:"memoryStat,omitempty"`
+	IOStat     *IOStat         `json:"ioStat,omitempty"`
+	PIDsStat   *PIDsStat       `json:"pidsStat,omitempty"`
+	NetStat    *network.Sample `json:"netStat,omitempty"`
+}
+
+func (s *Sample) Timestamp() time.Time {
+	return s.Timestamp_
+}
+
+// CPUStat represents the sampling state of the cgroupv2 CPU controller
+type CPUStat struct {
+	UsageNanos     *uint64   `json:"usageNanos,omitempty"`
+	UserNanos      *uint64   `json:"userNanos,omitempty"`
+	SystemNanos    *uint64   `json:"systemNanos,omitempty"`
+	NrPeriods      *uint32   `json:"nrPeriods,omitempty"`
+	NrThrottled    *uint32   `json:"nrThrottled,omitempty"`
+	ThrottledNanos *uint64   `json:"throttledNanos,omitempty"`
+	Pressure       *Pressure `json:"pressure,omitempty"`
+}
+
+// MemoryStat represents the sampling state of the cgroupv2 memory controller
+type MemoryStat struct {
+	SwapBytes     *uint64   `json:"swapBytes,omitempty"`
+	Anon          *uint64   `json:"anon,omitempty"`
+	File          *uint64   `json:"file,omitempty"`
+	Kernel        *uint64   `json:"kernel,omitempty"`
+	KernelStack   *uint64   `json:"kernelStack,omitempty"`
+	PageTables    *uint64   `json:"pageTables,omitempty"`
+	Sock          *uint64   `json:"sock,omitempty"`
+	Vmalloc       *uint64   `json:"vmalloc,omitempty"`
+	Shmem         *uint64   `json:"shmem,omitempty"`
+	FileMapped    *uint64   `json:"fileMapped,omitempty"`
+	FileDirty     *uint64   `json:"fileDirty,omitempty"`
+	FileWriteback *uint64   `json:"fileWriteback,omitempty"`
+	Slab          *uint64   `json:"slab,omitempty"`
+	Pgscan        *uint64   `json:"pgscan,omitempty"`
+	Pgsteal       *uint64   `json:"pgsteal,omitempty"`
+	Pgfault       *uint64   `json:"pgfault,omitempty"`
+	Pgmajfault    *uint64   `json:"pgmajfault,omitempty"`
+	Peak          *uint64   `json:"peak,omitempty"`
+	LowEvents     uint64    `json:"lowEvents,omitempty"`
+	HighEvents    uint64    `json:"highEvents,omitempty"`
+	MaxEvents     uint64    `json:"maxEvents,omitempty"`
+	OomEvents     uint64    `json:"oomEvents,omitempty"`
+	OomKillEvents uint64    `json:"oomKillEvents,omitempty"`
+	Pressure      *Pressure `json:"pressure,omitempty"`
+}
+
+// IOStat represents the sampling state of the cgroupv2 IO controller
+type IOStat struct {
+	ReadBytes    *uint64   `json:"readBytes,omitempty"`
+	WriteBytes   *uint64   `json:"writeBytes,omitempty"`
+	DiscardBytes *uint64   `json:"discardBytes,omitempty"`
+	ReadIOs      *uint64   `json:"readIOs,omitempty"`
+	WriteIOs     *uint64   `json:"writeIOs,omitempty"`
+	DiscardIOs   *uint64   `json:"discardIOs,omitempty"`
+	Pressure     *Pressure `json:"pressure,omitempty"`
+}
+
+// PIDsStat represents the sampling state of the cgroupv2 PIDs controller
+type PIDsStat struct {
+	Current *uint64 `json:"current,omitempty"`
+}
+
+// Pressure represents the sampling state of pressure files
+type Pressure struct {
+	Some *PressureValues `json:"some"`
+	Full *PressureValues `json:"full"`
+}
+
+type PressureValues struct {
+	Avg10  *float64 `json:"avg10"`
+	Avg60  *float64 `json:"avg60"`
+	Avg300 *float64 `json:"avg300"`
+	Total  *uint64  `json:"total"`
+}

+ 277 - 86
vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go

@@ -7,6 +7,7 @@ import (
 	"os"
 	"os"
 	"os/exec"
 	"os/exec"
 	"path/filepath"
 	"path/filepath"
+	"strconv"
 	"sync"
 	"sync"
 	"syscall"
 	"syscall"
 	"time"
 	"time"
@@ -22,6 +23,8 @@ import (
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor/oci"
 	"github.com/moby/buildkit/executor/oci"
+	"github.com/moby/buildkit/executor/resources"
+	resourcestypes "github.com/moby/buildkit/executor/resources/types"
 	gatewayapi "github.com/moby/buildkit/frontend/gateway/pb"
 	gatewayapi "github.com/moby/buildkit/frontend/gateway/pb"
 	"github.com/moby/buildkit/identity"
 	"github.com/moby/buildkit/identity"
 	"github.com/moby/buildkit/solver/pb"
 	"github.com/moby/buildkit/solver/pb"
@@ -50,6 +53,7 @@ type Opt struct {
 	ApparmorProfile string
 	ApparmorProfile string
 	SELinux         bool
 	SELinux         bool
 	TracingSocket   string
 	TracingSocket   string
+	ResourceMonitor *resources.Monitor
 }
 }
 
 
 var defaultCommandCandidates = []string{"buildkit-runc", "runc"}
 var defaultCommandCandidates = []string{"buildkit-runc", "runc"}
@@ -70,6 +74,7 @@ type runcExecutor struct {
 	apparmorProfile  string
 	apparmorProfile  string
 	selinux          bool
 	selinux          bool
 	tracingSocket    string
 	tracingSocket    string
+	resmon           *resources.Monitor
 }
 }
 
 
 func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Executor, error) {
 func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Executor, error) {
@@ -92,7 +97,7 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex
 
 
 	root := opt.Root
 	root := opt.Root
 
 
-	if err := os.MkdirAll(root, 0711); err != nil {
+	if err := os.MkdirAll(root, 0o711); err != nil {
 		return nil, errors.Wrapf(err, "failed to create %s", root)
 		return nil, errors.Wrapf(err, "failed to create %s", root)
 	}
 	}
 
 
@@ -135,11 +140,12 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex
 		apparmorProfile:  opt.ApparmorProfile,
 		apparmorProfile:  opt.ApparmorProfile,
 		selinux:          opt.SELinux,
 		selinux:          opt.SELinux,
 		tracingSocket:    opt.TracingSocket,
 		tracingSocket:    opt.TracingSocket,
+		resmon:           opt.ResourceMonitor,
 	}
 	}
 	return w, nil
 	return w, nil
 }
 }
 
 
-func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) {
+func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (rec resourcestypes.Recorder, err error) {
 	meta := process.Meta
 	meta := process.Meta
 
 
 	startedOnce := sync.Once{}
 	startedOnce := sync.Once{}
@@ -162,13 +168,18 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
 
 
 	provider, ok := w.networkProviders[meta.NetMode]
 	provider, ok := w.networkProviders[meta.NetMode]
 	if !ok {
 	if !ok {
-		return errors.Errorf("unknown network mode %s", meta.NetMode)
+		return nil, errors.Errorf("unknown network mode %s", meta.NetMode)
 	}
 	}
 	namespace, err := provider.New(ctx, meta.Hostname)
 	namespace, err := provider.New(ctx, meta.Hostname)
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
-	defer namespace.Close()
+	doReleaseNetwork := true
+	defer func() {
+		if doReleaseNetwork {
+			namespace.Close()
+		}
+	}()
 
 
 	if meta.NetMode == pb.NetMode_HOST {
 	if meta.NetMode == pb.NetMode_HOST {
 		bklog.G(ctx).Info("enabling HostNetworking")
 		bklog.G(ctx).Info("enabling HostNetworking")
@@ -176,12 +187,12 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
 
 
 	resolvConf, err := oci.GetResolvConf(ctx, w.root, w.idmap, w.dns)
 	resolvConf, err := oci.GetResolvConf(ctx, w.root, w.idmap, w.dns)
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 
 
 	hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, w.idmap, meta.Hostname)
 	hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, w.idmap, meta.Hostname)
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 	if clean != nil {
 	if clean != nil {
 		defer clean()
 		defer clean()
@@ -189,12 +200,12 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
 
 
 	mountable, err := root.Src.Mount(ctx, false)
 	mountable, err := root.Src.Mount(ctx, false)
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 
 
 	rootMount, release, err := mountable.Mount()
 	rootMount, release, err := mountable.Mount()
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 	if release != nil {
 	if release != nil {
 		defer release()
 		defer release()
@@ -205,8 +216,8 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
 	}
 	}
 	bundle := filepath.Join(w.root, id)
 	bundle := filepath.Join(w.root, id)
 
 
-	if err := os.Mkdir(bundle, 0711); err != nil {
-		return err
+	if err := os.Mkdir(bundle, 0o711); err != nil {
+		return nil, err
 	}
 	}
 	defer os.RemoveAll(bundle)
 	defer os.RemoveAll(bundle)
 
 
@@ -216,24 +227,24 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
 	}
 	}
 
 
 	rootFSPath := filepath.Join(bundle, "rootfs")
 	rootFSPath := filepath.Join(bundle, "rootfs")
-	if err := idtools.MkdirAllAndChown(rootFSPath, 0700, identity); err != nil {
-		return err
+	if err := idtools.MkdirAllAndChown(rootFSPath, 0o700, identity); err != nil {
+		return nil, err
 	}
 	}
 	if err := mount.All(rootMount, rootFSPath); err != nil {
 	if err := mount.All(rootMount, rootFSPath); err != nil {
-		return err
+		return nil, err
 	}
 	}
 	defer mount.Unmount(rootFSPath, 0)
 	defer mount.Unmount(rootFSPath, 0)
 
 
-	defer executor.MountStubsCleaner(rootFSPath, mounts, meta.RemoveMountStubsRecursive)()
+	defer executor.MountStubsCleaner(ctx, rootFSPath, mounts, meta.RemoveMountStubsRecursive)()
 
 
 	uid, gid, sgids, err := oci.GetUser(rootFSPath, meta.User)
 	uid, gid, sgids, err := oci.GetUser(rootFSPath, meta.User)
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 
 
 	f, err := os.Create(filepath.Join(bundle, "config.json"))
 	f, err := os.Create(filepath.Join(bundle, "config.json"))
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 	defer f.Close()
 	defer f.Close()
 
 
@@ -250,13 +261,13 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
 	if w.idmap != nil {
 	if w.idmap != nil {
 		identity, err = w.idmap.ToHost(identity)
 		identity, err = w.idmap.ToHost(identity)
 		if err != nil {
 		if err != nil {
-			return err
+			return nil, err
 		}
 		}
 	}
 	}
 
 
 	spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, w.processMode, w.idmap, w.apparmorProfile, w.selinux, w.tracingSocket, opts...)
 	spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, w.processMode, w.idmap, w.apparmorProfile, w.selinux, w.tracingSocket, opts...)
 	if err != nil {
 	if err != nil {
-		return err
+		return nil, err
 	}
 	}
 	defer cleanup()
 	defer cleanup()
 
 
@@ -267,11 +278,11 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
 
 
 	newp, err := fs.RootPath(rootFSPath, meta.Cwd)
 	newp, err := fs.RootPath(rootFSPath, meta.Cwd)
 	if err != nil {
 	if err != nil {
-		return errors.Wrapf(err, "working dir %s points to invalid target", newp)
+		return nil, errors.Wrapf(err, "working dir %s points to invalid target", newp)
 	}
 	}
 	if _, err := os.Stat(newp); err != nil {
 	if _, err := os.Stat(newp); err != nil {
-		if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil {
-			return errors.Wrapf(err, "failed to create working directory %s", newp)
+		if err := idtools.MkdirAllAndChown(newp, 0o755, identity); err != nil {
+			return nil, errors.Wrapf(err, "failed to create working directory %s", newp)
 		}
 		}
 	}
 	}
 
 
@@ -279,59 +290,63 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
 	spec.Process.OOMScoreAdj = w.oomScoreAdj
 	spec.Process.OOMScoreAdj = w.oomScoreAdj
 	if w.rootless {
 	if w.rootless {
 		if err := rootlessspecconv.ToRootless(spec); err != nil {
 		if err := rootlessspecconv.ToRootless(spec); err != nil {
-			return err
+			return nil, err
 		}
 		}
 	}
 	}
 
 
 	if err := json.NewEncoder(f).Encode(spec); err != nil {
 	if err := json.NewEncoder(f).Encode(spec); err != nil {
-		return err
+		return nil, err
 	}
 	}
 
 
-	// runCtx/killCtx is used for extra check in case the kill command blocks
-	runCtx, cancelRun := context.WithCancel(context.Background())
-	defer cancelRun()
+	bklog.G(ctx).Debugf("> creating %s %v", id, meta.Args)
 
 
-	ended := make(chan struct{})
-	go func() {
-		for {
-			select {
-			case <-ctx.Done():
-				killCtx, timeout := context.WithTimeout(context.Background(), 7*time.Second)
-				if err := w.runc.Kill(killCtx, id, int(syscall.SIGKILL), nil); err != nil {
-					bklog.G(ctx).Errorf("failed to kill runc %s: %+v", id, err)
-					select {
-					case <-killCtx.Done():
-						timeout()
-						cancelRun()
-						return
-					default:
-					}
-				}
-				timeout()
-				select {
-				case <-time.After(50 * time.Millisecond):
-				case <-ended:
-					return
-				}
-			case <-ended:
-				return
-			}
+	cgroupPath := spec.Linux.CgroupsPath
+	if cgroupPath != "" {
+		rec, err = w.resmon.RecordNamespace(cgroupPath, resources.RecordOpt{
+			NetworkSampler: namespace,
+		})
+		if err != nil {
+			return nil, err
 		}
 		}
-	}()
-
-	bklog.G(ctx).Debugf("> creating %s %v", id, meta.Args)
+	}
 
 
 	trace.SpanFromContext(ctx).AddEvent("Container created")
 	trace.SpanFromContext(ctx).AddEvent("Container created")
-	err = w.run(runCtx, id, bundle, process, func() {
+	err = w.run(ctx, id, bundle, process, func() {
 		startedOnce.Do(func() {
 		startedOnce.Do(func() {
 			trace.SpanFromContext(ctx).AddEvent("Container started")
 			trace.SpanFromContext(ctx).AddEvent("Container started")
 			if started != nil {
 			if started != nil {
 				close(started)
 				close(started)
 			}
 			}
+			if rec != nil {
+				rec.Start()
+			}
 		})
 		})
-	})
-	close(ended)
-	return exitError(ctx, err)
+	}, true)
+
+	releaseContainer := func(ctx context.Context) error {
+		err := w.runc.Delete(ctx, id, &runc.DeleteOpts{})
+		err1 := namespace.Close()
+		if err == nil {
+			err = err1
+		}
+		return err
+	}
+	doReleaseNetwork = false
+
+	err = exitError(ctx, err)
+	if err != nil {
+		if rec != nil {
+			rec.Close()
+		}
+		releaseContainer(context.TODO())
+		return nil, err
+	}
+
+	if rec == nil {
+		return nil, releaseContainer(context.TODO())
+	}
+
+	return rec, rec.CloseAsync(releaseContainer)
 }
 }
 
 
 func exitError(ctx context.Context, err error) error {
 func exitError(ctx context.Context, err error) error {
@@ -341,7 +356,7 @@ func exitError(ctx context.Context, err error) error {
 			Err:      err,
 			Err:      err,
 		}
 		}
 		var runcExitError *runc.ExitError
 		var runcExitError *runc.ExitError
-		if errors.As(err, &runcExitError) {
+		if errors.As(err, &runcExitError) && runcExitError.Status >= 0 {
 			exitErr = &gatewayapi.ExitError{
 			exitErr = &gatewayapi.ExitError{
 				ExitCode: uint32(runcExitError.Status),
 				ExitCode: uint32(runcExitError.Status),
 			}
 			}
@@ -462,23 +477,190 @@ func (s *forwardIO) Stderr() io.ReadCloser {
 	return nil
 	return nil
 }
 }
 
 
-// startingProcess is to track the os process so we can send signals to it.
-type startingProcess struct {
-	Process *os.Process
-	ready   chan struct{}
+// newRuncProcKiller returns an abstraction for sending SIGKILL to the
+// process inside the container initiated from `runc run`.
+func newRunProcKiller(runC *runc.Runc, id string) procKiller {
+	return procKiller{runC: runC, id: id}
 }
 }
 
 
-// Release will free resources with a startingProcess.
-func (p *startingProcess) Release() {
-	if p.Process != nil {
-		p.Process.Release()
+// newExecProcKiller returns an abstraction for sending SIGKILL to the
+// process inside the container initiated from `runc exec`.
+func newExecProcKiller(runC *runc.Runc, id string) (procKiller, error) {
+	// for `runc exec` we need to create a pidfile and read it later to kill
+	// the process
+	tdir, err := os.MkdirTemp("", "runc")
+	if err != nil {
+		return procKiller{}, errors.Wrap(err, "failed to create directory for runc pidfile")
+	}
+
+	return procKiller{
+		runC:    runC,
+		id:      id,
+		pidfile: filepath.Join(tdir, "pidfile"),
+		cleanup: func() {
+			os.RemoveAll(tdir)
+		},
+	}, nil
+}
+
+type procKiller struct {
+	runC    *runc.Runc
+	id      string
+	pidfile string
+	cleanup func()
+}
+
+// Cleanup will delete any tmp files created for the pidfile allocation
+// if this killer was for a `runc exec` process.
+func (k procKiller) Cleanup() {
+	if k.cleanup != nil {
+		k.cleanup()
+	}
+}
+
+// Kill will send SIGKILL to the process running inside the container.
+// If the process was created by `runc run` then we will use `runc kill`,
+// otherwise for `runc exec` we will read the pid from a pidfile and then
+// send the signal directly that process.
+func (k procKiller) Kill(ctx context.Context) (err error) {
+	bklog.G(ctx).Debugf("sending sigkill to process in container %s", k.id)
+	defer func() {
+		if err != nil {
+			bklog.G(ctx).Errorf("failed to kill process in container id %s: %+v", k.id, err)
+		}
+	}()
+
+	// this timeout is generally a no-op, the Kill ctx should already have a
+	// shorter timeout but here as a fail-safe for future refactoring.
+	ctx, timeout := context.WithTimeout(ctx, 10*time.Second)
+	defer timeout()
+
+	if k.pidfile == "" {
+		// for `runc run` process we use `runc kill` to terminate the process
+		return k.runC.Kill(ctx, k.id, int(syscall.SIGKILL), nil)
+	}
+
+	// `runc exec` will write the pidfile a few milliseconds after we
+	// get the runc pid via the startedCh, so we might need to retry until
+	// it appears in the edge case where we want to kill a process
+	// immediately after it was created.
+	var pidData []byte
+	for {
+		pidData, err = os.ReadFile(k.pidfile)
+		if err != nil {
+			if os.IsNotExist(err) {
+				select {
+				case <-ctx.Done():
+					return errors.New("context cancelled before runc wrote pidfile")
+				case <-time.After(10 * time.Millisecond):
+					continue
+				}
+			}
+			return errors.Wrap(err, "failed to read pidfile from runc")
+		}
+		break
+	}
+	pid, err := strconv.Atoi(string(pidData))
+	if err != nil {
+		return errors.Wrap(err, "read invalid pid from pidfile")
+	}
+	process, err := os.FindProcess(pid)
+	if err != nil {
+		// error only possible on non-unix hosts
+		return errors.Wrapf(err, "failed to find process for pid %d from pidfile", pid)
+	}
+	defer process.Release()
+	return process.Signal(syscall.SIGKILL)
+}
+
+// procHandle is to track the process so we can send signals to it
+// and handle graceful shutdown.
+type procHandle struct {
+	// this is for the runc process (not the process in-container)
+	monitorProcess *os.Process
+	ready          chan struct{}
+	ended          chan struct{}
+	shutdown       func()
+	// this this only used when the request context is canceled and we need
+	// to kill the in-container process.
+	killer procKiller
+}
+
+// runcProcessHandle will create a procHandle that will be monitored, where
+// on ctx.Done the in-container process will receive a SIGKILL.  The returned
+// context should be used for the go-runc.(Run|Exec) invocations.  The returned
+// context will only be canceled in the case where the request context is
+// canceled and we are unable to send the SIGKILL to the in-container process.
+// The goal is to allow for runc to gracefully shutdown when the request context
+// is cancelled.
+func runcProcessHandle(ctx context.Context, killer procKiller) (*procHandle, context.Context) {
+	runcCtx, cancel := context.WithCancel(context.Background())
+	p := &procHandle{
+		ready:    make(chan struct{}),
+		ended:    make(chan struct{}),
+		shutdown: cancel,
+		killer:   killer,
+	}
+	// preserve the logger on the context used for the runc process handling
+	runcCtx = bklog.WithLogger(runcCtx, bklog.G(ctx))
+
+	go func() {
+		// Wait for pid
+		select {
+		case <-ctx.Done():
+			return // nothing to kill
+		case <-p.ready:
+		}
+
+		for {
+			select {
+			case <-ctx.Done():
+				killCtx, timeout := context.WithTimeout(context.Background(), 7*time.Second)
+				if err := p.killer.Kill(killCtx); err != nil {
+					select {
+					case <-killCtx.Done():
+						timeout()
+						cancel()
+						return
+					default:
+					}
+				}
+				timeout()
+				select {
+				case <-time.After(50 * time.Millisecond):
+				case <-p.ended:
+					return
+				}
+			case <-p.ended:
+				return
+			}
+		}
+	}()
+
+	return p, runcCtx
+}
+
+// Release will free resources with a procHandle.
+func (p *procHandle) Release() {
+	close(p.ended)
+	if p.monitorProcess != nil {
+		p.monitorProcess.Release()
+	}
+}
+
+// Shutdown should be called after the runc process has exited. This will allow
+// the signal handling and tty resize loops to exit, terminating the
+// goroutines.
+func (p *procHandle) Shutdown() {
+	if p.shutdown != nil {
+		p.shutdown()
 	}
 	}
 }
 }
 
 
-// WaitForReady will wait until the Process has been populated or the
-// provided context was cancelled.  This should be called before using
-// the Process field.
-func (p *startingProcess) WaitForReady(ctx context.Context) error {
+// WaitForReady will wait until we have received the runc pid via the go-runc
+// Started channel, or until the request context is canceled.  This should
+// return without errors before attempting to send signals to the runc process.
+func (p *procHandle) WaitForReady(ctx context.Context) error {
 	select {
 	select {
 	case <-ctx.Done():
 	case <-ctx.Done():
 		return ctx.Err()
 		return ctx.Err()
@@ -487,35 +669,37 @@ func (p *startingProcess) WaitForReady(ctx context.Context) error {
 	}
 	}
 }
 }
 
 
-// WaitForStart will record the pid reported by Runc via the channel.
-// We wait for up to 10s for the runc process to start.  If the started
+// WaitForStart will record the runc pid reported by go-runc via the channel.
+// We wait for up to 10s for the runc pid to be reported.  If the started
 // callback is non-nil it will be called after receiving the pid.
 // callback is non-nil it will be called after receiving the pid.
-func (p *startingProcess) WaitForStart(ctx context.Context, startedCh <-chan int, started func()) error {
+func (p *procHandle) WaitForStart(ctx context.Context, startedCh <-chan int, started func()) error {
 	startedCtx, timeout := context.WithTimeout(ctx, 10*time.Second)
 	startedCtx, timeout := context.WithTimeout(ctx, 10*time.Second)
 	defer timeout()
 	defer timeout()
-	var err error
 	select {
 	select {
 	case <-startedCtx.Done():
 	case <-startedCtx.Done():
-		return errors.New("runc started message never received")
-	case pid, ok := <-startedCh:
+		return errors.New("go-runc started message never received")
+	case runcPid, ok := <-startedCh:
 		if !ok {
 		if !ok {
-			return errors.New("runc process failed to send pid")
+			return errors.New("go-runc failed to send pid")
 		}
 		}
 		if started != nil {
 		if started != nil {
 			started()
 			started()
 		}
 		}
-		p.Process, err = os.FindProcess(pid)
+		var err error
+		p.monitorProcess, err = os.FindProcess(runcPid)
 		if err != nil {
 		if err != nil {
-			return errors.Wrapf(err, "unable to find runc process for pid %d", pid)
+			// error only possible on non-unix hosts
+			return errors.Wrapf(err, "failed to find runc process %d", runcPid)
 		}
 		}
 		close(p.ready)
 		close(p.ready)
 	}
 	}
 	return nil
 	return nil
 }
 }
 
 
-// handleSignals will wait until the runcProcess is ready then will
-// send each signal received on the channel to the process.
-func handleSignals(ctx context.Context, runcProcess *startingProcess, signals <-chan syscall.Signal) error {
+// handleSignals will wait until the procHandle is ready then will
+// send each signal received on the channel to the runc process (not directly
+// to the in-container process)
+func handleSignals(ctx context.Context, runcProcess *procHandle, signals <-chan syscall.Signal) error {
 	if signals == nil {
 	if signals == nil {
 		return nil
 		return nil
 	}
 	}
@@ -528,8 +712,15 @@ func handleSignals(ctx context.Context, runcProcess *startingProcess, signals <-
 		case <-ctx.Done():
 		case <-ctx.Done():
 			return nil
 			return nil
 		case sig := <-signals:
 		case sig := <-signals:
-			err := runcProcess.Process.Signal(sig)
-			if err != nil {
+			if sig == syscall.SIGKILL {
+				// never send SIGKILL directly to runc, it needs to go to the
+				// process in-container
+				if err := runcProcess.killer.Kill(ctx); err != nil {
+					return err
+				}
+				continue
+			}
+			if err := runcProcess.monitorProcess.Signal(sig); err != nil {
 				bklog.G(ctx).Errorf("failed to signal %s to process: %s", sig, err)
 				bklog.G(ctx).Errorf("failed to signal %s to process: %s", sig, err)
 				return err
 				return err
 			}
 			}

+ 34 - 18
vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go

@@ -8,6 +8,7 @@ import (
 
 
 	runc "github.com/containerd/go-runc"
 	runc "github.com/containerd/go-runc"
 	"github.com/moby/buildkit/executor"
 	"github.com/moby/buildkit/executor"
+	"github.com/moby/buildkit/util/bklog"
 	"github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"golang.org/x/sync/errgroup"
 	"golang.org/x/sync/errgroup"
@@ -17,15 +18,21 @@ var unsupportedConsoleError = errors.New("tty for runc is only supported on linu
 
 
 func updateRuncFieldsForHostOS(runtime *runc.Runc) {}
 func updateRuncFieldsForHostOS(runtime *runc.Runc) {}
 
 
-func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func()) error {
+func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), keep bool) error {
 	if process.Meta.Tty {
 	if process.Meta.Tty {
 		return unsupportedConsoleError
 		return unsupportedConsoleError
 	}
 	}
-	return w.commonCall(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error {
+	extraArgs := []string{}
+	if keep {
+		extraArgs = append(extraArgs, "--keep")
+	}
+	killer := newRunProcKiller(w.runc, id)
+	return w.commonCall(ctx, id, bundle, process, started, killer, func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error {
 		_, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{
 		_, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{
-			NoPivot: w.noPivot,
-			Started: started,
-			IO:      io,
+			NoPivot:   w.noPivot,
+			Started:   started,
+			IO:        io,
+			ExtraArgs: extraArgs,
 		})
 		})
 		return err
 		return err
 	})
 	})
@@ -35,38 +42,47 @@ func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess
 	if process.Meta.Tty {
 	if process.Meta.Tty {
 		return unsupportedConsoleError
 		return unsupportedConsoleError
 	}
 	}
-	return w.commonCall(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error {
+
+	killer, err := newExecProcKiller(w.runc, id)
+	if err != nil {
+		return errors.Wrap(err, "failed to initialize process killer")
+	}
+	defer killer.Cleanup()
+
+	return w.commonCall(ctx, id, bundle, process, started, killer, func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error {
 		return w.runc.Exec(ctx, id, *specsProcess, &runc.ExecOpts{
 		return w.runc.Exec(ctx, id, *specsProcess, &runc.ExecOpts{
 			Started: started,
 			Started: started,
 			IO:      io,
 			IO:      io,
+			PidFile: pidfile,
 		})
 		})
 	})
 	})
 }
 }
 
 
-type runcCall func(ctx context.Context, started chan<- int, io runc.IO) error
+type runcCall func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error
 
 
 // commonCall is the common run/exec logic used for non-linux runtimes. A tty
 // commonCall is the common run/exec logic used for non-linux runtimes. A tty
 // is only supported for linux, so this really just handles signal propagation
 // is only supported for linux, so this really just handles signal propagation
 // to the started runc process.
 // to the started runc process.
-func (w *runcExecutor) commonCall(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), call runcCall) error {
-	runcProcess := &startingProcess{
-		ready: make(chan struct{}),
-	}
+func (w *runcExecutor) commonCall(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), killer procKiller, call runcCall) error {
+	runcProcess, ctx := runcProcessHandle(ctx, killer)
 	defer runcProcess.Release()
 	defer runcProcess.Release()
 
 
-	var eg errgroup.Group
-	egCtx, cancel := context.WithCancel(ctx)
-	defer eg.Wait()
-	defer cancel()
+	eg, ctx := errgroup.WithContext(ctx)
+	defer func() {
+		if err := eg.Wait(); err != nil && !errors.Is(err, context.Canceled) {
+			bklog.G(ctx).Errorf("runc process monitoring error: %s", err)
+		}
+	}()
+	defer runcProcess.Shutdown()
 
 
 	startedCh := make(chan int, 1)
 	startedCh := make(chan int, 1)
 	eg.Go(func() error {
 	eg.Go(func() error {
-		return runcProcess.WaitForStart(egCtx, startedCh, started)
+		return runcProcess.WaitForStart(ctx, startedCh, started)
 	})
 	})
 
 
 	eg.Go(func() error {
 	eg.Go(func() error {
-		return handleSignals(egCtx, runcProcess, process.Signal)
+		return handleSignals(ctx, runcProcess, process.Signal)
 	})
 	})
 
 
-	return call(ctx, startedCh, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr})
+	return call(ctx, startedCh, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}, killer.pidfile)
 }
 }

+ 39 - 23
vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go

@@ -21,50 +21,64 @@ func updateRuncFieldsForHostOS(runtime *runc.Runc) {
 	runtime.PdeathSignal = syscall.SIGKILL // this can still leak the process
 	runtime.PdeathSignal = syscall.SIGKILL // this can still leak the process
 }
 }
 
 
-func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func()) error {
-	return w.callWithIO(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error {
+func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), keep bool) error {
+	killer := newRunProcKiller(w.runc, id)
+	return w.callWithIO(ctx, id, bundle, process, started, killer, func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error {
+		extraArgs := []string{}
+		if keep {
+			extraArgs = append(extraArgs, "--keep")
+		}
 		_, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{
 		_, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{
-			NoPivot: w.noPivot,
-			Started: started,
-			IO:      io,
+			NoPivot:   w.noPivot,
+			Started:   started,
+			IO:        io,
+			ExtraArgs: extraArgs,
 		})
 		})
 		return err
 		return err
 	})
 	})
 }
 }
 
 
 func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess *specs.Process, process executor.ProcessInfo, started func()) error {
 func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess *specs.Process, process executor.ProcessInfo, started func()) error {
-	return w.callWithIO(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error {
+	killer, err := newExecProcKiller(w.runc, id)
+	if err != nil {
+		return errors.Wrap(err, "failed to initialize process killer")
+	}
+	defer killer.Cleanup()
+
+	return w.callWithIO(ctx, id, bundle, process, started, killer, func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error {
 		return w.runc.Exec(ctx, id, *specsProcess, &runc.ExecOpts{
 		return w.runc.Exec(ctx, id, *specsProcess, &runc.ExecOpts{
 			Started: started,
 			Started: started,
 			IO:      io,
 			IO:      io,
+			PidFile: pidfile,
 		})
 		})
 	})
 	})
 }
 }
 
 
-type runcCall func(ctx context.Context, started chan<- int, io runc.IO) error
+type runcCall func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error
 
 
-func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), call runcCall) error {
-	runcProcess := &startingProcess{
-		ready: make(chan struct{}),
-	}
+func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), killer procKiller, call runcCall) error {
+	runcProcess, ctx := runcProcessHandle(ctx, killer)
 	defer runcProcess.Release()
 	defer runcProcess.Release()
 
 
-	var eg errgroup.Group
-	egCtx, cancel := context.WithCancel(ctx)
-	defer eg.Wait()
-	defer cancel()
+	eg, ctx := errgroup.WithContext(ctx)
+	defer func() {
+		if err := eg.Wait(); err != nil && !errors.Is(err, context.Canceled) {
+			bklog.G(ctx).Errorf("runc process monitoring error: %s", err)
+		}
+	}()
+	defer runcProcess.Shutdown()
 
 
 	startedCh := make(chan int, 1)
 	startedCh := make(chan int, 1)
 	eg.Go(func() error {
 	eg.Go(func() error {
-		return runcProcess.WaitForStart(egCtx, startedCh, started)
+		return runcProcess.WaitForStart(ctx, startedCh, started)
 	})
 	})
 
 
 	eg.Go(func() error {
 	eg.Go(func() error {
-		return handleSignals(egCtx, runcProcess, process.Signal)
+		return handleSignals(ctx, runcProcess, process.Signal)
 	})
 	})
 
 
 	if !process.Meta.Tty {
 	if !process.Meta.Tty {
-		return call(ctx, startedCh, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr})
+		return call(ctx, startedCh, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}, killer.pidfile)
 	}
 	}
 
 
 	ptm, ptsName, err := console.NewPty()
 	ptm, ptsName, err := console.NewPty()
@@ -84,7 +98,7 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces
 		}
 		}
 		pts.Close()
 		pts.Close()
 		ptm.Close()
 		ptm.Close()
-		cancel() // this will shutdown resize and signal loops
+		runcProcess.Shutdown()
 		err := eg.Wait()
 		err := eg.Wait()
 		if err != nil {
 		if err != nil {
 			bklog.G(ctx).Warningf("error while shutting down tty io: %s", err)
 			bklog.G(ctx).Warningf("error while shutting down tty io: %s", err)
@@ -119,13 +133,13 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces
 	}
 	}
 
 
 	eg.Go(func() error {
 	eg.Go(func() error {
-		err := runcProcess.WaitForReady(egCtx)
+		err := runcProcess.WaitForReady(ctx)
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
 		for {
 		for {
 			select {
 			select {
-			case <-egCtx.Done():
+			case <-ctx.Done():
 				return nil
 				return nil
 			case resize := <-process.Resize:
 			case resize := <-process.Resize:
 				err = ptm.Resize(console.WinSize{
 				err = ptm.Resize(console.WinSize{
@@ -135,7 +149,9 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces
 				if err != nil {
 				if err != nil {
 					bklog.G(ctx).Errorf("failed to resize ptm: %s", err)
 					bklog.G(ctx).Errorf("failed to resize ptm: %s", err)
 				}
 				}
-				err = runcProcess.Process.Signal(signal.SIGWINCH)
+				// SIGWINCH must be sent to the runc monitor process, as
+				// terminal resizing is done in runc.
+				err = runcProcess.monitorProcess.Signal(signal.SIGWINCH)
 				if err != nil {
 				if err != nil {
 					bklog.G(ctx).Errorf("failed to send SIGWINCH to process: %s", err)
 					bklog.G(ctx).Errorf("failed to send SIGWINCH to process: %s", err)
 				}
 				}
@@ -154,5 +170,5 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces
 		runcIO.stderr = pts
 		runcIO.stderr = pts
 	}
 	}
 
 
-	return call(ctx, startedCh, runcIO)
+	return call(ctx, startedCh, runcIO, killer.pidfile)
 }
 }

+ 7 - 6
vendor/github.com/moby/buildkit/executor/stubs.go

@@ -1,17 +1,18 @@
 package executor
 package executor
 
 
 import (
 import (
+	"context"
 	"errors"
 	"errors"
 	"os"
 	"os"
 	"path/filepath"
 	"path/filepath"
 	"syscall"
 	"syscall"
 
 
 	"github.com/containerd/continuity/fs"
 	"github.com/containerd/continuity/fs"
+	"github.com/moby/buildkit/util/bklog"
 	"github.com/moby/buildkit/util/system"
 	"github.com/moby/buildkit/util/system"
-	"github.com/sirupsen/logrus"
 )
 )
 
 
-func MountStubsCleaner(dir string, mounts []Mount, recursive bool) func() {
+func MountStubsCleaner(ctx context.Context, dir string, mounts []Mount, recursive bool) func() {
 	names := []string{"/etc/resolv.conf", "/etc/hosts"}
 	names := []string{"/etc/resolv.conf", "/etc/hosts"}
 
 
 	for _, m := range mounts {
 	for _, m := range mounts {
@@ -72,23 +73,23 @@ func MountStubsCleaner(dir string, mounts []Mount, recursive bool) func() {
 			dir := filepath.Dir(p)
 			dir := filepath.Dir(p)
 			dirSt, err := os.Stat(dir)
 			dirSt, err := os.Stat(dir)
 			if err != nil {
 			if err != nil {
-				logrus.WithError(err).Warnf("Failed to stat %q (parent of mount stub %q)", dir, p)
+				bklog.G(ctx).WithError(err).Warnf("Failed to stat %q (parent of mount stub %q)", dir, p)
 				continue
 				continue
 			}
 			}
 			mtime := dirSt.ModTime()
 			mtime := dirSt.ModTime()
 			atime, err := system.Atime(dirSt)
 			atime, err := system.Atime(dirSt)
 			if err != nil {
 			if err != nil {
-				logrus.WithError(err).Warnf("Failed to stat atime of %q (parent of mount stub %q)", dir, p)
+				bklog.G(ctx).WithError(err).Warnf("Failed to stat atime of %q (parent of mount stub %q)", dir, p)
 				atime = mtime
 				atime = mtime
 			}
 			}
 
 
 			if err := os.Remove(p); err != nil {
 			if err := os.Remove(p); err != nil {
-				logrus.WithError(err).Warnf("Failed to remove mount stub %q", p)
+				bklog.G(ctx).WithError(err).Warnf("Failed to remove mount stub %q", p)
 			}
 			}
 
 
 			// Restore the timestamps of the dir
 			// Restore the timestamps of the dir
 			if err := os.Chtimes(dir, atime, mtime); err != nil {
 			if err := os.Chtimes(dir, atime, mtime); err != nil {
-				logrus.WithError(err).Warnf("Failed to restore time time mount stub timestamp (os.Chtimes(%q, %v, %v))", dir, atime, mtime)
+				bklog.G(ctx).WithError(err).Warnf("Failed to restore time time mount stub timestamp (os.Chtimes(%q, %v, %v))", dir, atime, mtime)
 			}
 			}
 		}
 		}
 	}
 	}

+ 5 - 5
vendor/github.com/moby/buildkit/exporter/containerimage/attestations.go

@@ -20,11 +20,11 @@ import (
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	spdx_json "github.com/spdx/tools-golang/json"
 	spdx_json "github.com/spdx/tools-golang/json"
-	"github.com/spdx/tools-golang/spdx/common"
-	spdx "github.com/spdx/tools-golang/spdx/v2_3"
+	"github.com/spdx/tools-golang/spdx"
+	"github.com/spdx/tools-golang/spdx/v2/common"
 )
 )
 
 
-var intotoPlatform ocispecs.Platform = ocispecs.Platform{
+var intotoPlatform = ocispecs.Platform{
 	Architecture: "unknown",
 	Architecture: "unknown",
 	OS:           "unknown",
 	OS:           "unknown",
 }
 }
@@ -122,7 +122,7 @@ func supplementSBOM(ctx context.Context, s session.Group, target cache.Immutable
 }
 }
 
 
 func decodeSPDX(dt []byte) (s *spdx.Document, err error) {
 func decodeSPDX(dt []byte) (s *spdx.Document, err error) {
-	doc, err := spdx_json.Load2_3(bytes.NewReader(dt))
+	doc, err := spdx_json.Read(bytes.NewReader(dt))
 	if err != nil {
 	if err != nil {
 		return nil, errors.Wrap(err, "unable to decode spdx")
 		return nil, errors.Wrap(err, "unable to decode spdx")
 	}
 	}
@@ -134,7 +134,7 @@ func decodeSPDX(dt []byte) (s *spdx.Document, err error) {
 
 
 func encodeSPDX(s *spdx.Document) (dt []byte, err error) {
 func encodeSPDX(s *spdx.Document) (dt []byte, err error) {
 	w := bytes.NewBuffer(nil)
 	w := bytes.NewBuffer(nil)
-	err = spdx_json.Save2_3(s, w)
+	err = spdx_json.Write(s, w)
 	if err != nil {
 	if err != nil {
 		return nil, errors.Wrap(err, "unable to encode spdx")
 		return nil, errors.Wrap(err, "unable to encode spdx")
 	}
 	}

+ 83 - 75
vendor/github.com/moby/buildkit/exporter/containerimage/export.go

@@ -5,19 +5,18 @@ import (
 	"encoding/base64"
 	"encoding/base64"
 	"encoding/json"
 	"encoding/json"
 	"fmt"
 	"fmt"
+	"sort"
 	"strconv"
 	"strconv"
 	"strings"
 	"strings"
-	"time"
 
 
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/content"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/errdefs"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/images"
 	"github.com/containerd/containerd/leases"
 	"github.com/containerd/containerd/leases"
+	"github.com/containerd/containerd/pkg/epoch"
 	"github.com/containerd/containerd/platforms"
 	"github.com/containerd/containerd/platforms"
-	"github.com/containerd/containerd/remotes"
 	"github.com/containerd/containerd/remotes/docker"
 	"github.com/containerd/containerd/remotes/docker"
 	"github.com/containerd/containerd/rootfs"
 	"github.com/containerd/containerd/rootfs"
-	intoto "github.com/in-toto/in-toto-golang/in_toto"
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/cache"
 	cacheconfig "github.com/moby/buildkit/cache/config"
 	cacheconfig "github.com/moby/buildkit/cache/config"
 	"github.com/moby/buildkit/exporter"
 	"github.com/moby/buildkit/exporter"
@@ -33,17 +32,10 @@ import (
 	"github.com/opencontainers/image-spec/identity"
 	"github.com/opencontainers/image-spec/identity"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
+	"golang.org/x/sync/errgroup"
 )
 )
 
 
 const (
 const (
-	keyPush           = "push"
-	keyPushByDigest   = "push-by-digest"
-	keyInsecure       = "registry.insecure"
-	keyUnpack         = "unpack"
-	keyDanglingPrefix = "dangling-name-prefix"
-	keyNameCanonical  = "name-canonical"
-	keyStore          = "store"
-
 	// keyUnsafeInternalStoreAllowIncomplete should only be used for tests. This option allows exporting image to the image store
 	// keyUnsafeInternalStoreAllowIncomplete should only be used for tests. This option allows exporting image to the image store
 	// as well as lacking some blobs in the content store. Some integration tests for lazyref behaviour depends on this option.
 	// as well as lacking some blobs in the content store. Some integration tests for lazyref behaviour depends on this option.
 	// Ignored when store=false.
 	// Ignored when store=false.
@@ -78,20 +70,19 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
 			RefCfg: cacheconfig.RefConfig{
 			RefCfg: cacheconfig.RefConfig{
 				Compression: compression.New(compression.Default),
 				Compression: compression.New(compression.Default),
 			},
 			},
-			BuildInfo:               true,
 			ForceInlineAttestations: true,
 			ForceInlineAttestations: true,
 		},
 		},
 		store: true,
 		store: true,
 	}
 	}
 
 
-	opt, err := i.opts.Load(opt)
+	opt, err := i.opts.Load(ctx, opt)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
 	for k, v := range opt {
 	for k, v := range opt {
-		switch k {
-		case keyPush:
+		switch exptypes.ImageExporterOptKey(k) {
+		case exptypes.OptKeyPush:
 			if v == "" {
 			if v == "" {
 				i.push = true
 				i.push = true
 				continue
 				continue
@@ -101,7 +92,7 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
 				return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
 				return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
 			}
 			}
 			i.push = b
 			i.push = b
-		case keyPushByDigest:
+		case exptypes.OptKeyPushByDigest:
 			if v == "" {
 			if v == "" {
 				i.pushByDigest = true
 				i.pushByDigest = true
 				continue
 				continue
@@ -111,7 +102,7 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
 				return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
 				return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
 			}
 			}
 			i.pushByDigest = b
 			i.pushByDigest = b
-		case keyInsecure:
+		case exptypes.OptKeyInsecure:
 			if v == "" {
 			if v == "" {
 				i.insecure = true
 				i.insecure = true
 				continue
 				continue
@@ -121,7 +112,7 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
 				return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
 				return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
 			}
 			}
 			i.insecure = b
 			i.insecure = b
-		case keyUnpack:
+		case exptypes.OptKeyUnpack:
 			if v == "" {
 			if v == "" {
 				i.unpack = true
 				i.unpack = true
 				continue
 				continue
@@ -131,7 +122,7 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
 				return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
 				return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
 			}
 			}
 			i.unpack = b
 			i.unpack = b
-		case keyStore:
+		case exptypes.OptKeyStore:
 			if v == "" {
 			if v == "" {
 				i.store = true
 				i.store = true
 				continue
 				continue
@@ -151,9 +142,9 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
 				return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
 				return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
 			}
 			}
 			i.storeAllowIncomplete = b
 			i.storeAllowIncomplete = b
-		case keyDanglingPrefix:
+		case exptypes.OptKeyDanglingPrefix:
 			i.danglingPrefix = v
 			i.danglingPrefix = v
-		case keyNameCanonical:
+		case exptypes.OptKeyNameCanonical:
 			if v == "" {
 			if v == "" {
 				i.nameCanonical = true
 				i.nameCanonical = true
 				continue
 				continue
@@ -247,60 +238,73 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source
 		for _, targetName := range targetNames {
 		for _, targetName := range targetNames {
 			if e.opt.Images != nil && e.store {
 			if e.opt.Images != nil && e.store {
 				tagDone := progress.OneOff(ctx, "naming to "+targetName)
 				tagDone := progress.OneOff(ctx, "naming to "+targetName)
+
+				// imageClientCtx is used for propagating the epoch to e.opt.Images.Update() and e.opt.Images.Create().
+				//
+				// Ideally, we should be able to propagate the epoch via images.Image.CreatedAt.
+				// However, due to a bug of containerd, we are temporarily stuck with this workaround.
+				// https://github.com/containerd/containerd/issues/8322
+				imageClientCtx := ctx
+				if e.opts.Epoch != nil {
+					imageClientCtx = epoch.WithSourceDateEpoch(imageClientCtx, e.opts.Epoch)
+				}
 				img := images.Image{
 				img := images.Image{
-					Target:    *desc,
-					CreatedAt: time.Now(),
+					Target: *desc,
+					// CreatedAt in images.Images is ignored due to a bug of containerd.
+					// See the comment lines for imageClientCtx.
 				}
 				}
+
 				sfx := []string{""}
 				sfx := []string{""}
 				if nameCanonical {
 				if nameCanonical {
 					sfx = append(sfx, "@"+desc.Digest.String())
 					sfx = append(sfx, "@"+desc.Digest.String())
 				}
 				}
 				for _, sfx := range sfx {
 				for _, sfx := range sfx {
 					img.Name = targetName + sfx
 					img.Name = targetName + sfx
-					if _, err := e.opt.Images.Update(ctx, img); err != nil {
+					if _, err := e.opt.Images.Update(imageClientCtx, img); err != nil {
 						if !errors.Is(err, errdefs.ErrNotFound) {
 						if !errors.Is(err, errdefs.ErrNotFound) {
 							return nil, nil, tagDone(err)
 							return nil, nil, tagDone(err)
 						}
 						}
 
 
-						if _, err := e.opt.Images.Create(ctx, img); err != nil {
+						if _, err := e.opt.Images.Create(imageClientCtx, img); err != nil {
 							return nil, nil, tagDone(err)
 							return nil, nil, tagDone(err)
 						}
 						}
 					}
 					}
 				}
 				}
 				tagDone(nil)
 				tagDone(nil)
 
 
-				if src.Ref != nil && e.unpack {
+				if e.unpack {
 					if err := e.unpackImage(ctx, img, src, session.NewGroup(sessionID)); err != nil {
 					if err := e.unpackImage(ctx, img, src, session.NewGroup(sessionID)); err != nil {
 						return nil, nil, err
 						return nil, nil, err
 					}
 					}
 				}
 				}
 
 
 				if !e.storeAllowIncomplete {
 				if !e.storeAllowIncomplete {
+					var refs []cache.ImmutableRef
 					if src.Ref != nil {
 					if src.Ref != nil {
-						remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID))
-						if err != nil {
-							return nil, nil, err
-						}
-						remote := remotes[0]
-						if unlazier, ok := remote.Provider.(cache.Unlazier); ok {
-							if err := unlazier.Unlazy(ctx); err != nil {
-								return nil, nil, err
-							}
-						}
+						refs = append(refs, src.Ref)
 					}
 					}
-					if len(src.Refs) > 0 {
-						for _, r := range src.Refs {
-							remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID))
+					for _, ref := range src.Refs {
+						refs = append(refs, ref)
+					}
+					eg, ctx := errgroup.WithContext(ctx)
+					for _, ref := range refs {
+						ref := ref
+						eg.Go(func() error {
+							remotes, err := ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID))
 							if err != nil {
 							if err != nil {
-								return nil, nil, err
+								return err
 							}
 							}
 							remote := remotes[0]
 							remote := remotes[0]
 							if unlazier, ok := remote.Provider.(cache.Unlazier); ok {
 							if unlazier, ok := remote.Provider.(cache.Unlazier); ok {
 								if err := unlazier.Unlazy(ctx); err != nil {
 								if err := unlazier.Unlazy(ctx); err != nil {
-									return nil, nil, err
+									return err
 								}
 								}
 							}
 							}
-						}
+							return nil
+						})
+					}
+					if err := eg.Wait(); err != nil {
+						return nil, nil, err
 					}
 					}
 				}
 				}
 			}
 			}
@@ -330,10 +334,18 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source
 }
 }
 
 
 func (e *imageExporterInstance) pushImage(ctx context.Context, src *exporter.Source, sessionID string, targetName string, dgst digest.Digest) error {
 func (e *imageExporterInstance) pushImage(ctx context.Context, src *exporter.Source, sessionID string, targetName string, dgst digest.Digest) error {
+	var refs []cache.ImmutableRef
+	if src.Ref != nil {
+		refs = append(refs, src.Ref)
+	}
+	for _, ref := range src.Refs {
+		refs = append(refs, ref)
+	}
+
 	annotations := map[digest.Digest]map[string]string{}
 	annotations := map[digest.Digest]map[string]string{}
 	mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore())
 	mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore())
-	if src.Ref != nil {
-		remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID))
+	for _, ref := range refs {
+		remotes, err := ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID))
 		if err != nil {
 		if err != nil {
 			return err
 			return err
 		}
 		}
@@ -343,25 +355,36 @@ func (e *imageExporterInstance) pushImage(ctx context.Context, src *exporter.Sou
 			addAnnotations(annotations, desc)
 			addAnnotations(annotations, desc)
 		}
 		}
 	}
 	}
-	if len(src.Refs) > 0 {
-		for _, r := range src.Refs {
-			remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID))
-			if err != nil {
-				return err
-			}
-			remote := remotes[0]
-			for _, desc := range remote.Descriptors {
-				mprovider.Add(desc.Digest, remote.Provider)
-				addAnnotations(annotations, desc)
-			}
-		}
-	}
-
-	ctx = remotes.WithMediaTypeKeyPrefix(ctx, intoto.PayloadType, "intoto")
 	return push.Push(ctx, e.opt.SessionManager, sessionID, mprovider, e.opt.ImageWriter.ContentStore(), dgst, targetName, e.insecure, e.opt.RegistryHosts, e.pushByDigest, annotations)
 	return push.Push(ctx, e.opt.SessionManager, sessionID, mprovider, e.opt.ImageWriter.ContentStore(), dgst, targetName, e.insecure, e.opt.RegistryHosts, e.pushByDigest, annotations)
 }
 }
 
 
 func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Image, src *exporter.Source, s session.Group) (err0 error) {
 func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Image, src *exporter.Source, s session.Group) (err0 error) {
+	matcher := platforms.Only(platforms.Normalize(platforms.DefaultSpec()))
+
+	ps, err := exptypes.ParsePlatforms(src.Metadata)
+	if err != nil {
+		return err
+	}
+	matching := []exptypes.Platform{}
+	for _, p2 := range ps.Platforms {
+		if matcher.Match(p2.Platform) {
+			matching = append(matching, p2)
+		}
+	}
+	if len(matching) == 0 {
+		// current platform was not found, so skip unpacking
+		return nil
+	}
+	sort.SliceStable(matching, func(i, j int) bool {
+		return matcher.Less(matching[i].Platform, matching[j].Platform)
+	})
+
+	ref, _ := src.FindRef(matching[0].ID)
+	if ref == nil {
+		// ref has no layers, so nothing to unpack
+		return nil
+	}
+
 	unpackDone := progress.OneOff(ctx, "unpacking to "+img.Name)
 	unpackDone := progress.OneOff(ctx, "unpacking to "+img.Name)
 	defer func() {
 	defer func() {
 		unpackDone(err0)
 		unpackDone(err0)
@@ -379,16 +402,7 @@ func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Imag
 		return err
 		return err
 	}
 	}
 
 
-	topLayerRef := src.Ref
-	if len(src.Refs) > 0 {
-		if r, ok := src.Refs[defaultPlatform()]; ok {
-			topLayerRef = r
-		} else {
-			return errors.Errorf("no reference for default platform %s", defaultPlatform())
-		}
-	}
-
-	remotes, err := topLayerRef.GetRemotes(ctx, true, e.opts.RefCfg, false, s)
+	remotes, err := ref.GetRemotes(ctx, true, e.opts.RefCfg, false, s)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}
@@ -461,12 +475,6 @@ func addAnnotations(m map[digest.Digest]map[string]string, desc ocispecs.Descrip
 	}
 	}
 }
 }
 
 
-func defaultPlatform() string {
-	// Use normalized platform string to avoid the mismatch with platform options which
-	// are normalized using platforms.Normalize()
-	return platforms.Format(platforms.Normalize(platforms.DefaultSpec()))
-}
-
 func NewDescriptorReference(desc ocispecs.Descriptor, release func(context.Context) error) exporter.DescriptorReference {
 func NewDescriptorReference(desc ocispecs.Descriptor, release func(context.Context) error) exporter.DescriptorReference {
 	return &descriptorReference{
 	return &descriptorReference{
 		desc:    desc,
 		desc:    desc,

+ 75 - 0
vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/keys.go

@@ -0,0 +1,75 @@
+package exptypes
+
+import commonexptypes "github.com/moby/buildkit/exporter/exptypes"
+
+type ImageExporterOptKey string
+
+// Options keys supported by the image exporter output.
+var (
+	// Name of the image.
+	// Value: string
+	OptKeyName ImageExporterOptKey = "name"
+
+	// Push after creating image.
+	// Value: bool <true|false>
+	OptKeyPush ImageExporterOptKey = "push"
+
+	// Push unnamed image.
+	// Value: bool <true|false>
+	OptKeyPushByDigest ImageExporterOptKey = "push-by-digest"
+
+	// Allow pushing to insecure HTTP registry.
+	// Value: bool <true|false>
+	OptKeyInsecure ImageExporterOptKey = "registry.insecure"
+
+	// Unpack image after it's created (containerd).
+	// Value: bool <true|false>
+	OptKeyUnpack ImageExporterOptKey = "unpack"
+
+	// Fallback image name prefix if image name isn't provided.
+	// If used, image will be named as <value>@<digest>
+	// Value: string
+	OptKeyDanglingPrefix ImageExporterOptKey = "dangling-name-prefix"
+
+	// Creates additional image name with format <name>@<digest>
+	// Value: bool <true|false>
+	OptKeyNameCanonical ImageExporterOptKey = "name-canonical"
+
+	// Store the resulting image along with all of the content it references.
+	// Ignored if the worker doesn't have image store (e.g. OCI worker).
+	// Value: bool <true|false>
+	OptKeyStore ImageExporterOptKey = "store"
+
+	// Use OCI mediatypes instead of Docker in JSON configs.
+	// Value: bool <true|false>
+	OptKeyOCITypes ImageExporterOptKey = "oci-mediatypes"
+
+	// Force attestation to be attached.
+	// Value: bool <true|false>
+	OptKeyForceInlineAttestations ImageExporterOptKey = "attestation-inline"
+
+	// Mark layers as non-distributable if they are found to use a
+	// non-distributable media type. When this option is not set, the exporter
+	// will change the media type of the layer to a distributable one.
+	// Value: bool <true|false>
+	OptKeyPreferNondistLayers ImageExporterOptKey = "prefer-nondist-layers"
+
+	// Clamp produced timestamps. For more information see the
+	// SOURCE_DATE_EPOCH specification.
+	// Value: int (number of seconds since Unix epoch)
+	OptKeySourceDateEpoch ImageExporterOptKey = ImageExporterOptKey(commonexptypes.OptKeySourceDateEpoch)
+
+	// Compression type for newly created and cached layers.
+	// estargz should be used with OptKeyOCITypes set to true.
+	// Value: string <uncompressed|gzip|estargz|zstd>
+	OptKeyLayerCompression ImageExporterOptKey = "compression"
+
+	// Force compression on all (including existing) layers.
+	// Value: bool <true|false>
+	OptKeyForceCompression ImageExporterOptKey = "force-compression"
+
+	// Compression level
+	// Value: int (0-9) for gzip and estargz
+	// Value: int (0-22) for zstd
+	OptKeyCompressionLevel ImageExporterOptKey = "compression-level"
+)

+ 0 - 3
vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go

@@ -11,9 +11,7 @@ const (
 	ExporterImageConfigDigestKey = "containerimage.config.digest"
 	ExporterImageConfigDigestKey = "containerimage.config.digest"
 	ExporterImageDescriptorKey   = "containerimage.descriptor"
 	ExporterImageDescriptorKey   = "containerimage.descriptor"
 	ExporterInlineCache          = "containerimage.inlinecache"
 	ExporterInlineCache          = "containerimage.inlinecache"
-	ExporterBuildInfo            = "containerimage.buildinfo" // Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md
 	ExporterPlatformsKey         = "refs.platforms"
 	ExporterPlatformsKey         = "refs.platforms"
-	ExporterEpochKey             = "source.date.epoch"
 )
 )
 
 
 // KnownRefMetadataKeys are the subset of exporter keys that can be suffixed by
 // KnownRefMetadataKeys are the subset of exporter keys that can be suffixed by
@@ -21,7 +19,6 @@ const (
 var KnownRefMetadataKeys = []string{
 var KnownRefMetadataKeys = []string{
 	ExporterImageConfigKey,
 	ExporterImageConfigKey,
 	ExporterInlineCache,
 	ExporterInlineCache,
-	ExporterBuildInfo,
 }
 }
 
 
 type Platforms struct {
 type Platforms struct {

+ 4 - 4
vendor/github.com/moby/buildkit/exporter/containerimage/image/docker_image.go

@@ -19,9 +19,10 @@ type HealthConfig struct {
 	Test []string `json:",omitempty"`
 	Test []string `json:",omitempty"`
 
 
 	// Zero means to inherit. Durations are expressed as integer nanoseconds.
 	// Zero means to inherit. Durations are expressed as integer nanoseconds.
-	Interval    time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
-	Timeout     time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
-	StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
+	Interval      time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
+	Timeout       time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+	StartPeriod   time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
+	StartInterval time.Duration `json:",omitempty"` // StartInterval is the time to wait between checks during the start period.
 
 
 	// Retries is the number of consecutive failures needed to consider a container as unhealthy.
 	// Retries is the number of consecutive failures needed to consider a container as unhealthy.
 	// Zero means inherit.
 	// Zero means inherit.
@@ -33,7 +34,6 @@ type ImageConfig struct {
 	ocispecs.ImageConfig
 	ocispecs.ImageConfig
 
 
 	Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
 	Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
-	ArgsEscaped bool          `json:",omitempty"` // True if command is already escaped (Windows specific)
 
 
 	//	NetworkDisabled bool                `json:",omitempty"` // Is network disabled
 	//	NetworkDisabled bool                `json:",omitempty"` // Is network disabled
 	//	MacAddress      string              `json:",omitempty"` // Mac Address of the container
 	//	MacAddress      string              `json:",omitempty"` // Mac Address of the container

+ 19 - 48
vendor/github.com/moby/buildkit/exporter/containerimage/opts.go

@@ -1,30 +1,16 @@
 package containerimage
 package containerimage
 
 
 import (
 import (
+	"context"
 	"strconv"
 	"strconv"
 	"time"
 	"time"
 
 
 	cacheconfig "github.com/moby/buildkit/cache/config"
 	cacheconfig "github.com/moby/buildkit/cache/config"
+	"github.com/moby/buildkit/exporter/containerimage/exptypes"
 	"github.com/moby/buildkit/exporter/util/epoch"
 	"github.com/moby/buildkit/exporter/util/epoch"
+	"github.com/moby/buildkit/util/bklog"
 	"github.com/moby/buildkit/util/compression"
 	"github.com/moby/buildkit/util/compression"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
-)
-
-const (
-	keyImageName               = "name"
-	keyLayerCompression        = "compression"
-	keyCompressionLevel        = "compression-level"
-	keyForceCompression        = "force-compression"
-	keyOCITypes                = "oci-mediatypes"
-	keyBuildInfo               = "buildinfo"
-	keyBuildInfoAttrs          = "buildinfo-attrs"
-	keyForceInlineAttestations = "attestation-inline"
-
-	// preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was
-	// already found to use a non-distributable media type.
-	// When this option is not set, the exporter will change the media type of the layer to a distributable one.
-	keyPreferNondistLayers = "prefer-nondist-layers"
 )
 )
 
 
 type ImageCommitOpts struct {
 type ImageCommitOpts struct {
@@ -35,12 +21,9 @@ type ImageCommitOpts struct {
 	Epoch       *time.Time
 	Epoch       *time.Time
 
 
 	ForceInlineAttestations bool // force inline attestations to be attached
 	ForceInlineAttestations bool // force inline attestations to be attached
-
-	BuildInfo      bool // Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md
-	BuildInfoAttrs bool // Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md
 }
 }
 
 
-func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error) {
+func (c *ImageCommitOpts) Load(ctx context.Context, opt map[string]string) (map[string]string, error) {
 	rest := make(map[string]string)
 	rest := make(map[string]string)
 
 
 	as, optb, err := ParseAnnotations(toBytesMap(opt))
 	as, optb, err := ParseAnnotations(toBytesMap(opt))
@@ -54,32 +37,20 @@ func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error)
 		return nil, err
 		return nil, err
 	}
 	}
 
 
+	if c.RefCfg.Compression, err = compression.ParseAttributes(opt); err != nil {
+		return nil, err
+	}
+
 	for k, v := range opt {
 	for k, v := range opt {
 		var err error
 		var err error
-		switch k {
-		case keyImageName:
+		switch exptypes.ImageExporterOptKey(k) {
+		case exptypes.OptKeyName:
 			c.ImageName = v
 			c.ImageName = v
-		case keyLayerCompression:
-			c.RefCfg.Compression.Type, err = compression.Parse(v)
-		case keyCompressionLevel:
-			ii, err2 := strconv.ParseInt(v, 10, 64)
-			if err != nil {
-				err = errors.Wrapf(err2, "non-int value %s specified for %s", v, k)
-				break
-			}
-			v := int(ii)
-			c.RefCfg.Compression.Level = &v
-		case keyForceCompression:
-			err = parseBoolWithDefault(&c.RefCfg.Compression.Force, k, v, true)
-		case keyOCITypes:
+		case exptypes.OptKeyOCITypes:
 			err = parseBoolWithDefault(&c.OCITypes, k, v, true)
 			err = parseBoolWithDefault(&c.OCITypes, k, v, true)
-		case keyBuildInfo:
-			err = parseBoolWithDefault(&c.BuildInfo, k, v, true)
-		case keyBuildInfoAttrs:
-			err = parseBoolWithDefault(&c.BuildInfoAttrs, k, v, false)
-		case keyForceInlineAttestations:
+		case exptypes.OptKeyForceInlineAttestations:
 			err = parseBool(&c.ForceInlineAttestations, k, v)
 			err = parseBool(&c.ForceInlineAttestations, k, v)
-		case keyPreferNondistLayers:
+		case exptypes.OptKeyPreferNondistLayers:
 			err = parseBool(&c.RefCfg.PreferNonDistributable, k, v)
 			err = parseBool(&c.RefCfg.PreferNonDistributable, k, v)
 		default:
 		default:
 			rest[k] = v
 			rest[k] = v
@@ -91,11 +62,11 @@ func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error)
 	}
 	}
 
 
 	if c.RefCfg.Compression.Type.OnlySupportOCITypes() {
 	if c.RefCfg.Compression.Type.OnlySupportOCITypes() {
-		c.EnableOCITypes(c.RefCfg.Compression.Type.String())
+		c.EnableOCITypes(ctx, c.RefCfg.Compression.Type.String())
 	}
 	}
 
 
 	if c.RefCfg.Compression.Type.NeedsForceCompression() {
 	if c.RefCfg.Compression.Type.NeedsForceCompression() {
-		c.EnableForceCompression(c.RefCfg.Compression.Type.String())
+		c.EnableForceCompression(ctx, c.RefCfg.Compression.Type.String())
 	}
 	}
 
 
 	c.Annotations = c.Annotations.Merge(as)
 	c.Annotations = c.Annotations.Merge(as)
@@ -103,25 +74,25 @@ func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error)
 	return rest, nil
 	return rest, nil
 }
 }
 
 
-func (c *ImageCommitOpts) EnableOCITypes(reason string) {
+func (c *ImageCommitOpts) EnableOCITypes(ctx context.Context, reason string) {
 	if !c.OCITypes {
 	if !c.OCITypes {
 		message := "forcibly turning on oci-mediatype mode"
 		message := "forcibly turning on oci-mediatype mode"
 		if reason != "" {
 		if reason != "" {
 			message += " for " + reason
 			message += " for " + reason
 		}
 		}
-		logrus.Warn(message)
+		bklog.G(ctx).Warn(message)
 
 
 		c.OCITypes = true
 		c.OCITypes = true
 	}
 	}
 }
 }
 
 
-func (c *ImageCommitOpts) EnableForceCompression(reason string) {
+func (c *ImageCommitOpts) EnableForceCompression(ctx context.Context, reason string) {
 	if !c.RefCfg.Compression.Force {
 	if !c.RefCfg.Compression.Force {
 		message := "forcibly turning on force-compression mode"
 		message := "forcibly turning on force-compression mode"
 		if reason != "" {
 		if reason != "" {
 			message += " for " + reason
 			message += " for " + reason
 		}
 		}
-		logrus.Warn(message)
+		bklog.G(ctx).Warn(message)
 
 
 		c.RefCfg.Compression.Force = true
 		c.RefCfg.Compression.Force = true
 	}
 	}

+ 36 - 88
vendor/github.com/moby/buildkit/exporter/containerimage/writer.go

@@ -26,8 +26,6 @@ import (
 	"github.com/moby/buildkit/solver/result"
 	"github.com/moby/buildkit/solver/result"
 	attestationTypes "github.com/moby/buildkit/util/attestation"
 	attestationTypes "github.com/moby/buildkit/util/attestation"
 	"github.com/moby/buildkit/util/bklog"
 	"github.com/moby/buildkit/util/bklog"
-	"github.com/moby/buildkit/util/buildinfo"
-	binfotypes "github.com/moby/buildkit/util/buildinfo/types"
 	"github.com/moby/buildkit/util/compression"
 	"github.com/moby/buildkit/util/compression"
 	"github.com/moby/buildkit/util/progress"
 	"github.com/moby/buildkit/util/progress"
 	"github.com/moby/buildkit/util/purl"
 	"github.com/moby/buildkit/util/purl"
@@ -36,6 +34,7 @@ import (
 	digest "github.com/opencontainers/go-digest"
 	digest "github.com/opencontainers/go-digest"
 	specs "github.com/opencontainers/image-spec/specs-go"
 	specs "github.com/opencontainers/image-spec/specs-go"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
 	ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/package-url/packageurl-go"
 	"github.com/pkg/errors"
 	"github.com/pkg/errors"
 	"go.opentelemetry.io/otel/attribute"
 	"go.opentelemetry.io/otel/attribute"
 	"go.opentelemetry.io/otel/trace"
 	"go.opentelemetry.io/otel/trace"
@@ -102,7 +101,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session
 			}
 			}
 		}
 		}
 		if len(a.Index)+len(a.IndexDescriptor)+len(a.ManifestDescriptor) > 0 {
 		if len(a.Index)+len(a.IndexDescriptor)+len(a.ManifestDescriptor) > 0 {
-			opts.EnableOCITypes("annotations")
+			opts.EnableOCITypes(ctx, "annotations")
 		}
 		}
 	}
 	}
 
 
@@ -127,15 +126,6 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session
 			return nil, err
 			return nil, err
 		}
 		}
 
 
-		var dtbi []byte
-		if opts.BuildInfo {
-			if dtbi, err = buildinfo.Format(exptypes.ParseKey(inp.Metadata, exptypes.ExporterBuildInfo, p), buildinfo.FormatOpts{
-				RemoveAttrs: !opts.BuildInfoAttrs,
-			}); err != nil {
-				return nil, err
-			}
-		}
-
 		annotations := opts.Annotations.Platform(nil)
 		annotations := opts.Annotations.Platform(nil)
 		if len(annotations.Index) > 0 || len(annotations.IndexDescriptor) > 0 {
 		if len(annotations.Index) > 0 || len(annotations.IndexDescriptor) > 0 {
 			return nil, errors.Errorf("index annotations not supported for single platform export")
 			return nil, errors.Errorf("index annotations not supported for single platform export")
@@ -143,7 +133,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session
 
 
 		config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p)
 		config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p)
 		inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p)
 		inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p)
-		mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, opts, ref, config, &remotes[0], annotations, inlineCache, dtbi, opts.Epoch, session.NewGroup(sessionID))
+		mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, opts, ref, config, &remotes[0], annotations, inlineCache, opts.Epoch, session.NewGroup(sessionID))
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
@@ -159,7 +149,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session
 	}
 	}
 
 
 	if len(inp.Attestations) > 0 {
 	if len(inp.Attestations) > 0 {
-		opts.EnableOCITypes("attestations")
+		opts.EnableOCITypes(ctx, "attestations")
 	}
 	}
 
 
 	refs := make([]cache.ImmutableRef, 0, len(inp.Refs))
 	refs := make([]cache.ImmutableRef, 0, len(inp.Refs))
@@ -178,19 +168,11 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	idx := struct {
-		// MediaType is reserved in the OCI spec but
-		// excluded from go types.
-		MediaType string `json:"mediaType,omitempty"`
-
-		ocispecs.Index
-	}{
-		MediaType: ocispecs.MediaTypeImageIndex,
-		Index: ocispecs.Index{
-			Annotations: opts.Annotations.Platform(nil).Index,
-			Versioned: specs.Versioned{
-				SchemaVersion: 2,
-			},
+	idx := ocispecs.Index{
+		MediaType:   ocispecs.MediaTypeImageIndex,
+		Annotations: opts.Annotations.Platform(nil).Index,
+		Versioned: specs.Versioned{
+			SchemaVersion: 2,
 		},
 		},
 	}
 	}
 
 
@@ -210,15 +192,6 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session
 		config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p)
 		config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p)
 		inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p)
 		inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p)
 
 
-		var dtbi []byte
-		if opts.BuildInfo {
-			if dtbi, err = buildinfo.Format(exptypes.ParseKey(inp.Metadata, exptypes.ExporterBuildInfo, p), buildinfo.FormatOpts{
-				RemoveAttrs: !opts.BuildInfoAttrs,
-			}); err != nil {
-				return nil, err
-			}
-		}
-
 		remote := &remotes[remotesMap[p.ID]]
 		remote := &remotes[remotesMap[p.ID]]
 		if remote == nil {
 		if remote == nil {
 			remote = &solver.Remote{
 			remote = &solver.Remote{
@@ -226,7 +199,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session
 			}
 			}
 		}
 		}
 
 
-		desc, _, err := ic.commitDistributionManifest(ctx, opts, r, config, remote, opts.Annotations.Platform(&p.Platform), inlineCache, dtbi, opts.Epoch, session.NewGroup(sessionID))
+		desc, _, err := ic.commitDistributionManifest(ctx, opts, r, config, remote, opts.Annotations.Platform(&p.Platform), inlineCache, opts.Epoch, session.NewGroup(sessionID))
 		if err != nil {
 		if err != nil {
 			return nil, err
 			return nil, err
 		}
 		}
@@ -263,7 +236,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session
 				if name == "" {
 				if name == "" {
 					continue
 					continue
 				}
 				}
-				pl, err := purl.RefToPURL(name, &p.Platform)
+				pl, err := purl.RefToPURL(packageurl.TypeDocker, name, &p.Platform)
 				if err != nil {
 				if err != nil {
 					return nil, err
 					return nil, err
 				}
 				}
@@ -350,7 +323,7 @@ func (ic *ImageWriter) exportLayers(ctx context.Context, refCfg cacheconfig.RefC
 	return out, err
 	return out, err
 }
 }
 
 
-func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *ImageCommitOpts, ref cache.ImmutableRef, config []byte, remote *solver.Remote, annotations *Annotations, inlineCache []byte, buildInfo []byte, epoch *time.Time, sg session.Group) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) {
+func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *ImageCommitOpts, ref cache.ImmutableRef, config []byte, remote *solver.Remote, annotations *Annotations, inlineCache []byte, epoch *time.Time, sg session.Group) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) {
 	if len(config) == 0 {
 	if len(config) == 0 {
 		var err error
 		var err error
 		config, err = defaultImageConfig()
 		config, err = defaultImageConfig()
@@ -369,7 +342,7 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *Ima
 		return nil, nil, err
 		return nil, nil, err
 	}
 	}
 
 
-	config, err = patchImageConfig(config, remote.Descriptors, history, inlineCache, buildInfo, epoch)
+	config, err = patchImageConfig(config, remote.Descriptors, history, inlineCache, epoch)
 	if err != nil {
 	if err != nil {
 		return nil, nil, err
 		return nil, nil, err
 	}
 	}
@@ -386,24 +359,16 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *Ima
 		configType = images.MediaTypeDockerSchema2Config
 		configType = images.MediaTypeDockerSchema2Config
 	}
 	}
 
 
-	mfst := struct {
-		// MediaType is reserved in the OCI spec but
-		// excluded from go types.
-		MediaType string `json:"mediaType,omitempty"`
-
-		ocispecs.Manifest
-	}{
-		MediaType: manifestType,
-		Manifest: ocispecs.Manifest{
-			Annotations: annotations.Manifest,
-			Versioned: specs.Versioned{
-				SchemaVersion: 2,
-			},
-			Config: ocispecs.Descriptor{
-				Digest:    configDigest,
-				Size:      int64(len(config)),
-				MediaType: configType,
-			},
+	mfst := ocispecs.Manifest{
+		MediaType:   manifestType,
+		Annotations: annotations.Manifest,
+		Versioned: specs.Versioned{
+			SchemaVersion: 2,
+		},
+		Config: ocispecs.Descriptor{
+			Digest:    configDigest,
+			Size:      int64(len(config)),
+			MediaType: configType,
 		},
 		},
 	}
 	}
 
 
@@ -411,9 +376,10 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *Ima
 		"containerd.io/gc.ref.content.0": configDigest.String(),
 		"containerd.io/gc.ref.content.0": configDigest.String(),
 	}
 	}
 
 
-	for _, desc := range remote.Descriptors {
+	for i, desc := range remote.Descriptors {
 		desc.Annotations = RemoveInternalLayerAnnotations(desc.Annotations, opts.OCITypes)
 		desc.Annotations = RemoveInternalLayerAnnotations(desc.Annotations, opts.OCITypes)
 		mfst.Layers = append(mfst.Layers, desc)
 		mfst.Layers = append(mfst.Layers, desc)
+		labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = desc.Digest.String()
 	}
 	}
 
 
 	mfstJSON, err := json.MarshalIndent(mfst, "", "  ")
 	mfstJSON, err := json.MarshalIndent(mfst, "", "  ")
@@ -473,7 +439,7 @@ func (ic *ImageWriter) commitAttestationsManifest(ctx context.Context, opts *Ima
 		}
 		}
 		digest := digest.FromBytes(data)
 		digest := digest.FromBytes(data)
 		desc := ocispecs.Descriptor{
 		desc := ocispecs.Descriptor{
-			MediaType: attestationTypes.MediaTypeDockerSchema2AttestationType,
+			MediaType: intoto.PayloadType,
 			Digest:    digest,
 			Digest:    digest,
 			Size:      int64(len(data)),
 			Size:      int64(len(data)),
 			Annotations: map[string]string{
 			Annotations: map[string]string{
@@ -499,23 +465,15 @@ func (ic *ImageWriter) commitAttestationsManifest(ctx context.Context, opts *Ima
 		MediaType: configType,
 		MediaType: configType,
 	}
 	}
 
 
-	mfst := struct {
-		// MediaType is reserved in the OCI spec but
-		// excluded from go types.
-		MediaType string `json:"mediaType,omitempty"`
-
-		ocispecs.Manifest
-	}{
+	mfst := ocispecs.Manifest{
 		MediaType: manifestType,
 		MediaType: manifestType,
-		Manifest: ocispecs.Manifest{
-			Versioned: specs.Versioned{
-				SchemaVersion: 2,
-			},
-			Config: ocispecs.Descriptor{
-				Digest:    configDigest,
-				Size:      int64(len(config)),
-				MediaType: configType,
-			},
+		Versioned: specs.Versioned{
+			SchemaVersion: 2,
+		},
+		Config: ocispecs.Descriptor{
+			Digest:    configDigest,
+			Size:      int64(len(config)),
+			MediaType: configType,
 		},
 		},
 	}
 	}
 
 
@@ -610,7 +568,7 @@ func parseHistoryFromConfig(dt []byte) ([]ocispecs.History, error) {
 	return config.History, nil
 	return config.History, nil
 }
 }
 
 
-func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs.History, cache []byte, buildInfo []byte, epoch *time.Time) ([]byte, error) {
+func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs.History, cache []byte, epoch *time.Time) ([]byte, error) {
 	m := map[string]json.RawMessage{}
 	m := map[string]json.RawMessage{}
 	if err := json.Unmarshal(dt, &m); err != nil {
 	if err := json.Unmarshal(dt, &m); err != nil {
 		return nil, errors.Wrap(err, "failed to parse image config for patch")
 		return nil, errors.Wrap(err, "failed to parse image config for patch")
@@ -678,16 +636,6 @@ func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs
 		m["moby.buildkit.cache.v0"] = dt
 		m["moby.buildkit.cache.v0"] = dt
 	}
 	}
 
 
-	if buildInfo != nil {
-		dt, err := json.Marshal(buildInfo)
-		if err != nil {
-			return nil, err
-		}
-		m[binfotypes.ImageConfigField] = dt
-	} else {
-		delete(m, binfotypes.ImageConfigField)
-	}
-
 	dt, err = json.Marshal(m)
 	dt, err = json.Marshal(m)
 	return dt, errors.Wrap(err, "failed to marshal config after patch")
 	return dt, errors.Wrap(err, "failed to marshal config after patch")
 }
 }
@@ -774,7 +722,7 @@ func normalizeLayersAndHistory(ctx context.Context, remote *solver.Remote, histo
 	}
 	}
 
 
 	// convert between oci and docker media types (or vice versa) if needed
 	// convert between oci and docker media types (or vice versa) if needed
-	remote.Descriptors = compression.ConvertAllLayerMediaTypes(oci, remote.Descriptors...)
+	remote.Descriptors = compression.ConvertAllLayerMediaTypes(ctx, oci, remote.Descriptors...)
 
 
 	return remote, history
 	return remote, history
 }
 }

+ 15 - 0
vendor/github.com/moby/buildkit/exporter/exptypes/keys.go

@@ -0,0 +1,15 @@
+package exptypes
+
+const (
+	ExporterEpochKey = "source.date.epoch"
+)
+
+type ExporterOptKey string
+
+// Options keys supported by all exporters.
+var (
+	// Clamp produced timestamps. For more information see the
+	// SOURCE_DATE_EPOCH specification.
+	// Value: int (number of seconds since Unix epoch)
+	OptKeySourceDateEpoch ExporterOptKey = "source-date-epoch"
+)

+ 41 - 29
vendor/github.com/moby/buildkit/exporter/local/export.go

@@ -4,6 +4,7 @@ import (
 	"context"
 	"context"
 	"os"
 	"os"
 	"strings"
 	"strings"
+	"sync"
 	"time"
 	"time"
 
 
 	"github.com/moby/buildkit/cache"
 	"github.com/moby/buildkit/cache"
@@ -20,10 +21,6 @@ import (
 	"golang.org/x/time/rate"
 	"golang.org/x/time/rate"
 )
 )
 
 
-const (
-	keyAttestationPrefix = "attestation-prefix"
-)
-
 type Opt struct {
 type Opt struct {
 	SessionManager *session.Manager
 	SessionManager *session.Manager
 }
 }
@@ -39,23 +36,12 @@ func New(opt Opt) (exporter.Exporter, error) {
 }
 }
 
 
 func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
 func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
-	tm, _, err := epoch.ParseExporterAttrs(opt)
-	if err != nil {
-		return nil, err
-	}
-
 	i := &localExporterInstance{
 	i := &localExporterInstance{
 		localExporter: e,
 		localExporter: e,
-		opts: CreateFSOpts{
-			Epoch: tm,
-		},
 	}
 	}
-
-	for k, v := range opt {
-		switch k {
-		case keyAttestationPrefix:
-			i.opts.AttestationPrefix = v
-		}
+	_, err := i.opts.Load(opt)
+	if err != nil {
+		return nil, err
 	}
 	}
 
 
 	return i, nil
 	return i, nil
@@ -107,6 +93,9 @@ func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source
 
 
 	now := time.Now().Truncate(time.Second)
 	now := time.Now().Truncate(time.Second)
 
 
+	visitedPath := map[string]string{}
+	var visitedMu sync.Mutex
+
 	export := func(ctx context.Context, k string, ref cache.ImmutableRef, attestations []exporter.Attestation) func() error {
 	export := func(ctx context.Context, k string, ref cache.ImmutableRef, attestations []exporter.Attestation) func() error {
 		return func() error {
 		return func() error {
 			outputFS, cleanup, err := CreateFS(ctx, sessionID, k, ref, attestations, now, e.opts)
 			outputFS, cleanup, err := CreateFS(ctx, sessionID, k, ref, attestations, now, e.opts)
@@ -117,20 +106,43 @@ func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source
 				defer cleanup()
 				defer cleanup()
 			}
 			}
 
 
+			if !e.opts.PlatformSplit {
+				// check for duplicate paths
+				err = outputFS.Walk(ctx, func(p string, fi os.FileInfo, err error) error {
+					if fi.IsDir() {
+						return nil
+					}
+					if err != nil && !errors.Is(err, os.ErrNotExist) {
+						return err
+					}
+					visitedMu.Lock()
+					defer visitedMu.Unlock()
+					if vp, ok := visitedPath[p]; ok {
+						return errors.Errorf("cannot overwrite %s from %s with %s when split option is disabled", p, vp, k)
+					}
+					visitedPath[p] = k
+					return nil
+				})
+				if err != nil {
+					return err
+				}
+			}
+
 			lbl := "copying files"
 			lbl := "copying files"
 			if isMap {
 			if isMap {
 				lbl += " " + k
 				lbl += " " + k
-				st := fstypes.Stat{
-					Mode: uint32(os.ModeDir | 0755),
-					Path: strings.Replace(k, "/", "_", -1),
-				}
-				if e.opts.Epoch != nil {
-					st.ModTime = e.opts.Epoch.UnixNano()
-				}
-
-				outputFS, err = fsutil.SubDirFS([]fsutil.Dir{{FS: outputFS, Stat: st}})
-				if err != nil {
-					return err
+				if e.opts.PlatformSplit {
+					st := fstypes.Stat{
+						Mode: uint32(os.ModeDir | 0755),
+						Path: strings.Replace(k, "/", "_", -1),
+					}
+					if e.opts.Epoch != nil {
+						st.ModTime = e.opts.Epoch.UnixNano()
+					}
+					outputFS, err = fsutil.SubDirFS([]fsutil.Dir{{FS: outputFS, Stat: st}})
+					if err != nil {
+						return err
+					}
 				}
 				}
 			}
 			}
 
 

Some files were not shown because too many files changed in this diff