diff --git a/builder/builder-next/adapters/containerimage/pull.go b/builder/builder-next/adapters/containerimage/pull.go index 890261ce39..55394948ea 100644 --- a/builder/builder-next/adapters/containerimage/pull.go +++ b/builder/builder-next/adapters/containerimage/pull.go @@ -63,7 +63,7 @@ type SourceOpt struct { // Source is the source implementation for accessing container images type Source struct { SourceOpt - g flightcontrol.Group + g flightcontrol.Group[interface{}] } // NewSource creates a new image source @@ -187,7 +187,7 @@ func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session type puller struct { is *Source resolveLocalOnce sync.Once - g flightcontrol.Group + g flightcontrol.Group[struct{}] src *source.ImageIdentifier desc ocispec.Descriptor ref string @@ -258,7 +258,7 @@ func (p *puller) resolveLocal() { } func (p *puller) resolve(ctx context.Context, g session.Group) error { - _, err := p.g.Do(ctx, "", func(ctx context.Context) (_ interface{}, err error) { + _, err := p.g.Do(ctx, "", func(ctx context.Context) (_ struct{}, err error) { resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String()) defer func() { resolveProgressDone(err) @@ -266,13 +266,13 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error { ref, err := distreference.ParseNormalizedNamed(p.src.Reference.String()) if err != nil { - return nil, err + return struct{}{}, err } if p.desc.Digest == "" && p.config == nil { origRef, desc, err := p.resolver(g).Resolve(ctx, ref.String()) if err != nil { - return nil, err + return struct{}{}, err } p.desc = desc @@ -287,16 +287,16 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error { if p.config == nil && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest { ref, err := distreference.WithDigest(ref, p.desc.Digest) if err != nil { - return nil, err + return struct{}{}, err } _, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm, g) if err != nil { - return nil, err + return struct{}{}, err } p.config = dt } - return nil, nil + return struct{}{}, nil }) return err } diff --git a/builder/builder-next/executor_linux.go b/builder/builder-next/executor_linux.go index b672d718bd..bee2371220 100644 --- a/builder/builder-next/executor_linux.go +++ b/builder/builder-next/executor_linux.go @@ -14,6 +14,7 @@ import ( "github.com/docker/docker/pkg/stringid" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/executor/oci" + "github.com/moby/buildkit/executor/resources" "github.com/moby/buildkit/executor/runcexecutor" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/solver/pb" @@ -49,6 +50,11 @@ func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfi pidmap = nil } + rm, err := resources.NewMonitor() + if err != nil { + return nil, err + } + return runcexecutor.New(runcexecutor.Opt{ Root: filepath.Join(root, "executor"), CommandCandidates: []string{"runc"}, @@ -58,6 +64,7 @@ func newExecutor(root, cgroupParent string, net *libnetwork.Controller, dnsConfi IdentityMapping: pidmap, DNS: dnsConfig, ApparmorProfile: apparmorProfile, + ResourceMonitor: rm, }, networkProviders) } @@ -119,6 +126,11 @@ func (iface *lnInterface) init(c *libnetwork.Controller, n *libnetwork.Network) iface.ep = ep } +// TODO(neersighted): Unstub Sample(), and collect data from the libnetwork Endpoint. +func (iface *lnInterface) Sample() (*network.Sample, error) { + return &network.Sample{}, nil +} + func (iface *lnInterface) Set(s *specs.Spec) error { <-iface.ready if iface.err != nil { diff --git a/builder/builder-next/executor_nolinux.go b/builder/builder-next/executor_nolinux.go index 7275822da2..5c4ee6a72b 100644 --- a/builder/builder-next/executor_nolinux.go +++ b/builder/builder-next/executor_nolinux.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/executor/oci" + resourcetypes "github.com/moby/buildkit/executor/resources/types" ) func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool, _ idtools.IdentityMapping, _ string) (executor.Executor, error) { @@ -20,8 +21,8 @@ func newExecutor(_, _ string, _ *libnetwork.Controller, _ *oci.DNSConfig, _ bool type stubExecutor struct{} -func (w *stubExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) { - return errors.New("buildkit executor not implemented for "+runtime.GOOS) +func (w *stubExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (resourcetypes.Recorder, error) { + return nil, errors.New("buildkit executor not implemented for "+runtime.GOOS) } func (w *stubExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) error { diff --git a/integration/build/build_traces_test.go b/integration/build/build_traces_test.go index 1f047a98d8..1cbc02f987 100644 --- a/integration/build/build_traces_test.go +++ b/integration/build/build_traces_test.go @@ -57,7 +57,7 @@ func TestBuildkitHistoryTracePropagation(t *testing.T) { }() eg.Go(func() error { - _, err := progressui.DisplaySolveStatus(ctxGo, "test", nil, &testWriter{t}, ch) + _, err := progressui.DisplaySolveStatus(ctxGo, nil, &testWriter{t}, ch, progressui.WithPhase("test")) return err }) diff --git a/vendor.mod b/vendor.mod index a178966d8e..dfab3a75a1 100644 --- a/vendor.mod +++ b/vendor.mod @@ -61,7 +61,7 @@ require ( github.com/miekg/dns v1.1.43 github.com/mistifyio/go-zfs/v3 v3.0.1 github.com/mitchellh/copystructure v1.2.0 - github.com/moby/buildkit v0.11.7-0.20230723230859-616c3f613b54 // v0.11 branch + github.com/moby/buildkit v0.12.2 github.com/moby/ipvs v1.1.0 github.com/moby/locker v1.0.1 github.com/moby/patternmatcher v0.6.0 @@ -116,6 +116,7 @@ require ( cloud.google.com/go/longrunning v0.4.1 // indirect github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect github.com/agext/levenshtein v1.2.3 // indirect + github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 // indirect @@ -134,10 +135,9 @@ require ( github.com/containerd/console v1.0.3 // indirect github.com/containerd/go-cni v1.1.9 // indirect github.com/containerd/go-runc v1.1.0 // indirect - github.com/containerd/nydus-snapshotter v0.3.1 // indirect + github.com/containerd/nydus-snapshotter v0.8.2 // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect github.com/containerd/ttrpc v1.2.2 // indirect - github.com/containerd/typeurl v1.0.2 // indirect github.com/containernetworking/cni v1.1.2 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect @@ -177,7 +177,7 @@ require ( github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f // indirect + github.com/spdx/tools-golang v0.5.1 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tinylib/msgp v1.1.8 // indirect github.com/tonistiigi/fsutil v0.0.0-20230629203738-36ef4d8c0dbb // indirect diff --git a/vendor.sum b/vendor.sum index 44cf660fd8..53eb4aafc1 100644 --- a/vendor.sum +++ b/vendor.sum @@ -117,8 +117,10 @@ github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0 h1:cOjLyhBhe91glgZZNbQUg9BJC57l6BiSKov0Ivv7k0U= github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0/go.mod h1:fBaQWrftOD5CrVCUfoYGHs4X4VViTuGOXA8WloCjTY0= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.1.0 h1:Y2lUDsFKVRSYGojLJ1yLxSXdMmMYTYls0rCvoqmMUQk= github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= @@ -155,6 +157,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1rlcoLz8y5B2r4tTLMiVTrMtpfY0O8EScKJxaSaEc= +github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -326,8 +330,8 @@ github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHr github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/go-runc v1.1.0 h1:OX4f+/i2y5sUT7LhmcJH7GYrjjhHa1QI4e8yO0gGleA= github.com/containerd/go-runc v1.1.0/go.mod h1:xJv2hFF7GvHtTJd9JqTS2UVxMkULUYw4JN5XAUZqH5U= -github.com/containerd/nydus-snapshotter v0.3.1 h1:b8WahTrPkt3XsabjG2o/leN4fw3HWZYr+qxo/Z8Mfzk= -github.com/containerd/nydus-snapshotter v0.3.1/go.mod h1:+8R7NX7vrjlxAgtidnsstwIhpzyTlriYPssTxH++uiM= +github.com/containerd/nydus-snapshotter v0.8.2 h1:7SOrMU2YmLzfbsr5J7liMZJlNi5WT6vtIOxLGv+iz7E= +github.com/containerd/nydus-snapshotter v0.8.2/go.mod h1:UJILTN5LVBRY+dt8BGJbp72Xy729hUZsOugObEI3/O8= github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4= github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= @@ -337,8 +341,6 @@ github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtO github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= -github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= -github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4= github.com/containerd/typeurl/v2 v2.1.1/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= @@ -626,7 +628,6 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -906,8 +907,8 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mndrix/tap-go v0.0.0-20171203230836-629fa407e90b/go.mod h1:pzzDgJWZ34fGzaAZGFW22KVZDfyrYW+QABMrWnJBnSs= github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ= -github.com/moby/buildkit v0.11.7-0.20230723230859-616c3f613b54 h1:LSh03Csyx/zQq8MreC9MYMQE/+5EkohwZMvXSS6kMZo= -github.com/moby/buildkit v0.11.7-0.20230723230859-616c3f613b54/go.mod h1:bMQDryngJKGvJ/ZuRFhrejurbvYSv3NkGCheQ59X4AM= +github.com/moby/buildkit v0.12.2 h1:B7guBgY6sfk4dBlv/ORUxyYlp0UojYaYyATgtNwSCXc= +github.com/moby/buildkit v0.12.2/go.mod h1:adB4y0SxxX8trnrY+oEulb48ODLqPO6pKMF0ppGcCoI= github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ= github.com/moby/ipvs v1.1.0/go.mod h1:4VJMWuf098bsUMmZEiD4Tjk/O7mOn3l1PTD3s4OoYAs= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= @@ -1159,8 +1160,8 @@ github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34c github.com/sourcegraph/go-diff v0.5.3/go.mod h1:v9JDtjCE4HHHCZGId75rg8gkKKa98RVjBcBGsVmMmak= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM= -github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f h1:9B623Cfs+mclYK6dsae7gLSwuIBHvlgmEup87qpqsAQ= -github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f/go.mod h1:VHzvNsKAfAGqs4ZvwRL+7a0dNsL20s7lGui4K9C0xQM= +github.com/spdx/tools-golang v0.5.1 h1:fJg3SVOGG+eIva9ZUBm/hvyA7PIPVFjRxUKe6fdAgwE= +github.com/spdx/tools-golang v0.5.1/go.mod h1:/DRDQuBfB37HctM29YtrX1v+bXiVmT2OpQDalRmX9aU= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= @@ -1201,7 +1202,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1951,6 +1953,8 @@ k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuBkhu+yxOc1Js= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +kernel.org/pub/linux/libs/security/libcap/cap v1.2.67 h1:sPQ9qlSNR26fToTKbxe/HDWJlXvBLqGmt84LGCQkOy0= +kernel.org/pub/linux/libs/security/libcap/psx v1.2.67 h1:NxbXJ7pDVq0FKBsqjieT92QDXI2XaqH2HAi4QcCOHt8= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= diff --git a/vendor/github.com/anchore/go-struct-converter/.bouncer.yaml b/vendor/github.com/anchore/go-struct-converter/.bouncer.yaml new file mode 100644 index 0000000000..db50b4d30f --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/.bouncer.yaml @@ -0,0 +1,10 @@ +permit: + - BSD.* + - CC0.* + - MIT.* + - Apache.* + - MPL.* + - ISC + - WTFPL + +ignore-packages: diff --git a/vendor/github.com/anchore/go-struct-converter/.gitignore b/vendor/github.com/anchore/go-struct-converter/.gitignore new file mode 100644 index 0000000000..1edd832da1 --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/.gitignore @@ -0,0 +1,30 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work + +# tools +.tmp + +# test output +test/results + +# IDE project files +.idea diff --git a/vendor/github.com/anchore/go-struct-converter/.golangci.yaml b/vendor/github.com/anchore/go-struct-converter/.golangci.yaml new file mode 100644 index 0000000000..fdb37721db --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/.golangci.yaml @@ -0,0 +1,78 @@ +#issues: +# # The list of ids of default excludes to include or disable. +# include: +# - EXC0002 # disable excluding of issues about comments from golint + +linters: + # inverted configuration with `enable-all` and `disable` is not scalable during updates of golangci-lint + disable-all: true + enable: + - asciicheck + - bodyclose + - depguard + - dogsled + - dupl + - errcheck + - exportloopref + - funlen + - gocognit + - goconst + - gocritic + - gocyclo + - gofmt + - goprintffuncname + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nakedret + - nolintlint + - revive + - staticcheck + - stylecheck + - typecheck + - unconvert + - unparam + - unused + - whitespace + +# do not enable... +# - gochecknoglobals +# - gochecknoinits # this is too aggressive +# - rowserrcheck disabled per generics https://github.com/golangci/golangci-lint/issues/2649 +# - godot +# - godox +# - goerr113 +# - goimports # we're using gosimports now instead to account for extra whitespaces (see https://github.com/golang/go/issues/20818) +# - golint # deprecated +# - gomnd # this is too aggressive +# - interfacer # this is a good idea, but is no longer supported and is prone to false positives +# - lll # without a way to specify per-line exception cases, this is not usable +# - maligned # this is an excellent linter, but tricky to optimize and we are not sensitive to memory layout optimizations +# - nestif +# - prealloc # following this rule isn't consistently a good idea, as it sometimes forces unnecessary allocations that result in less idiomatic code +# - scopelint # deprecated +# - testpackage +# - wsl # this doens't have an auto-fixer yet and is pretty noisy (https://github.com/bombsimon/wsl/issues/90) + +linters-settings: + funlen: + # Checks the number of lines in a function. + # If lower than 0, disable the check. + # Default: 60 + lines: 140 + # Checks the number of statements in a function. + # If lower than 0, disable the check. + # Default: 40 + statements: 100 + + gocognit: + # Minimal code complexity to report + # Default: 30 (but we recommend 10-20) + min-complexity: 80 + + gocyclo: + # Minimal code complexity to report. + # Default: 30 (but we recommend 10-20) + min-complexity: 50 diff --git a/vendor/github.com/anchore/go-struct-converter/CONTRIBUTING.md b/vendor/github.com/anchore/go-struct-converter/CONTRIBUTING.md new file mode 100644 index 0000000000..9ff2670b2f --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/CONTRIBUTING.md @@ -0,0 +1,86 @@ +# Contributing to go-struct-converter + +If you are looking to contribute to this project and want to open a GitHub pull request ("PR"), there are a few guidelines of what we are looking for in patches. Make sure you go through this document and ensure that your code proposal is aligned. + +## Sign off your work + +The `sign-off` is an added line at the end of the explanation for the commit, certifying that you wrote it or otherwise have the right to submit it as an open-source patch. By submitting a contribution, you agree to be bound by the terms of the DCO Version 1.1 and Apache License Version 2.0. + +Signing off a commit certifies the below Developer's Certificate of Origin (DCO): + +```text +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + + (a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + + (b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + + (c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + + (d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +All contributions to this project are licensed under the [Apache License Version 2.0, January 2004](http://www.apache.org/licenses/). + +When committing your change, you can add the required line manually so that it looks like this: + +```text +Signed-off-by: John Doe +``` + +Alternatively, configure your Git client with your name and email to use the `-s` flag when creating a commit: + +```text +$ git config --global user.name "John Doe" +$ git config --global user.email "john.doe@example.com" +``` + +Creating a signed-off commit is then possible with `-s` or `--signoff`: + +```text +$ git commit -s -m "this is a commit message" +``` + +To double-check that the commit was signed-off, look at the log output: + +```text +$ git log -1 +commit 37ceh170e4hb283bb73d958f2036ee5k07e7fde7 (HEAD -> issue-35, origin/main, main) +Author: John Doe +Date: Mon Aug 1 11:27:13 2020 -0400 + + this is a commit message + + Signed-off-by: John Doe +``` + +[//]: # "TODO: Commit guidelines, granular commits" +[//]: # "TODO: Commit guidelines, descriptive messages" +[//]: # "TODO: Commit guidelines, commit title, extra body description" +[//]: # "TODO: PR title and description" + +## Test your changes + +Ensure that your changes have passed the test suite. + +Simply run `make test` to have all tests run and validate changes work properly. + +## Document your changes + +When proposed changes are modifying user-facing functionality or output, it is expected the PR will include updates to the documentation as well. diff --git a/vendor/github.com/containerd/typeurl/LICENSE b/vendor/github.com/anchore/go-struct-converter/LICENSE similarity index 93% rename from vendor/github.com/containerd/typeurl/LICENSE rename to vendor/github.com/anchore/go-struct-converter/LICENSE index 584149b6ee..261eeb9e9f 100644 --- a/vendor/github.com/containerd/typeurl/LICENSE +++ b/vendor/github.com/anchore/go-struct-converter/LICENSE @@ -1,7 +1,6 @@ - Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -176,13 +175,24 @@ END OF TERMS AND CONDITIONS - Copyright The containerd Authors + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/vendor/github.com/anchore/go-struct-converter/Makefile b/vendor/github.com/anchore/go-struct-converter/Makefile new file mode 100644 index 0000000000..f5412aef5c --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/Makefile @@ -0,0 +1,81 @@ +TEMPDIR = ./.tmp + +# commands and versions +LINTCMD = $(TEMPDIR)/golangci-lint run --tests=false --timeout=5m --config .golangci.yaml +GOIMPORTS_CMD = $(TEMPDIR)/gosimports -local github.com/anchore + +# tool versions +GOLANGCILINT_VERSION = v1.50.1 +GOSIMPORTS_VERSION = v0.3.4 +BOUNCER_VERSION = v0.4.0 + +# formatting variables +BOLD := $(shell tput -T linux bold) +PURPLE := $(shell tput -T linux setaf 5) +GREEN := $(shell tput -T linux setaf 2) +CYAN := $(shell tput -T linux setaf 6) +RED := $(shell tput -T linux setaf 1) +RESET := $(shell tput -T linux sgr0) +TITLE := $(BOLD)$(PURPLE) +SUCCESS := $(BOLD)$(GREEN) + +# test variables +RESULTSDIR = test/results +COVER_REPORT = $(RESULTSDIR)/unit-coverage-details.txt +COVER_TOTAL = $(RESULTSDIR)/unit-coverage-summary.txt +# the quality gate lower threshold for unit test total % coverage (by function statements) +COVERAGE_THRESHOLD := 80 + +$(RESULTSDIR): + mkdir -p $(RESULTSDIR) + +$(TEMPDIR): + mkdir -p $(TEMPDIR) + +.PHONY: bootstrap-tools +bootstrap-tools: $(TEMPDIR) + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(TEMPDIR)/ $(GOLANGCILINT_VERSION) + curl -sSfL https://raw.githubusercontent.com/wagoodman/go-bouncer/master/bouncer.sh | sh -s -- -b $(TEMPDIR)/ $(BOUNCER_VERSION) + # the only difference between goimports and gosimports is that gosimports removes extra whitespace between import blocks (see https://github.com/golang/go/issues/20818) + GOBIN="$(realpath $(TEMPDIR))" go install github.com/rinchsan/gosimports/cmd/gosimports@$(GOSIMPORTS_VERSION) + +.PHONY: static-analysis +static-analysis: check-licenses lint + +.PHONY: lint +lint: ## Run gofmt + golangci lint checks + $(call title,Running linters) + # ensure there are no go fmt differences + @printf "files with gofmt issues: [$(shell gofmt -l -s .)]\n" + @test -z "$(shell gofmt -l -s .)" + + # run all golangci-lint rules + $(LINTCMD) + @[ -z "$(shell $(GOIMPORTS_CMD) -d .)" ] || (echo "goimports needs to be fixed" && false) + + # go tooling does not play well with certain filename characters, ensure the common cases don't result in future "go get" failures + $(eval MALFORMED_FILENAMES := $(shell find . | grep -e ':')) + @bash -c "[[ '$(MALFORMED_FILENAMES)' == '' ]] || (printf '\nfound unsupported filename characters:\n$(MALFORMED_FILENAMES)\n\n' && false)" + +.PHONY: lint-fix +lint-fix: ## Auto-format all source code + run golangci lint fixers + $(call title,Running lint fixers) + gofmt -w -s . + $(GOIMPORTS_CMD) -w . + $(LINTCMD) --fix + go mod tidy + +.PHONY: check-licenses +check-licenses: ## Ensure transitive dependencies are compliant with the current license policy + $(TEMPDIR)/bouncer check ./... + +.PHONY: unit +unit: $(RESULTSDIR) ## Run unit tests (with coverage) + $(call title,Running unit tests) + go test -coverprofile $(COVER_REPORT) $(shell go list ./... | grep -v anchore/syft/test) + @go tool cover -func $(COVER_REPORT) | grep total | awk '{print substr($$3, 1, length($$3)-1)}' > $(COVER_TOTAL) + @echo "Coverage: $$(cat $(COVER_TOTAL))" + @if [ $$(echo "$$(cat $(COVER_TOTAL)) >= $(COVERAGE_THRESHOLD)" | bc -l) -ne 1 ]; then echo "$(RED)$(BOLD)Failed coverage quality gate (> $(COVERAGE_THRESHOLD)%)$(RESET)" && false; fi + +.PHONY: test +test: unit diff --git a/vendor/github.com/anchore/go-struct-converter/README.md b/vendor/github.com/anchore/go-struct-converter/README.md new file mode 100644 index 0000000000..06d8e4311e --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/README.md @@ -0,0 +1,166 @@ +# Go `struct` Converter + +A library for converting between Go structs. + +```go +chain := converter.NewChain(V1{}, V2{}, V3{}) + +chain.Convert(myV1struct, &myV3struct) +``` + +## Details + +At its core, this library provides a `Convert` function, which automatically +handles converting fields with the same name, and "convertable" +types. Some examples are: +* `string` -> `string` +* `string` -> `*string` +* `int` -> `string` +* `string` -> `[]string` + +The automatic conversions are implemented when there is an obvious way +to convert between the types. A lot more automatic conversions happen +-- see [the converter tests](converter_test.go) for a more comprehensive +list of what is currently supported. + +Not everything can be handled automatically, however, so there is also +a `ConvertFrom` interface any struct in the graph can implement to +perform custom conversion, similar to how the stdlib `MarshalJSON` and +`UnmarshalJSON` would be implemented. + +Additionally, and maybe most importantly, there is a `converter.Chain` available, +which orchestrates conversions between _multiple versions_ of structs. This could +be thought of similar to database migrations: given a starting struct and a target +struct, the `chain.Convert` function iterates through every intermediary migration +in order to arrive at the target struct. + +## Basic Usage + +To illustrate usage we'll start with a few basic structs, some of which +implement the `ConvertFrom` interface due to breaking changes: + +```go +// --------- V1 struct definition below --------- + +type V1 struct { + Name string + OldField string +} + +// --------- V2 struct definition below --------- + +type V2 struct { + Name string + NewField string // this was a renamed field +} + +func (to *V2) ConvertFrom(from interface{}) error { + if from, ok := from.(V1); ok { // forward migration + to.NewField = from.OldField + } + return nil +} + +// --------- V3 struct definition below --------- + +type V3 struct { + Name []string + FinalField []string // this field was renamed and the type was changed +} + +func (to *V3) ConvertFrom(from interface{}) error { + if from, ok := from.(V2); ok { // forward migration + to.FinalField = []string{from.NewField} + } + return nil +} +``` + +Given these type definitions, we can easily set up a conversion chain +like this: + +```go +chain := converter.NewChain(V1{}, V2{}, V3{}) +``` + +This chain can then be used to convert from an _older version_ to a _newer +version_. This is because our `ConvertFrom` definitions are only handling +_forward_ migrations. + +This chain can be used to convert from a `V1` struct to a `V3` struct easily, +like this: + +```go +v1 := // somehow get a populated v1 struct +v3 := V3{} +chain.Convert(v1, &v3) +``` + +Since we've defined our chain as `V1` → `V2` → `V3`, the chain will execute +conversions to all intermediary structs (`V2`, in this case) and ultimately end +when we've populated the `v3` instance. + +Note we haven't needed to define any conversions on the `Name` field of any structs +since this one is convertible between structs: `string` → `string` → `[]string`. + +## Backwards Migrations + +If we wanted to _also_ provide backwards migrations, we could also easily add a case +to the `ConvertFrom` methods. The whole set of structs would look something like this: + + +```go +// --------- V1 struct definition below --------- + +type V1 struct { + Name string + OldField string +} + +func (to *V1) ConvertFrom(from interface{}) error { + if from, ok := from.(V2); ok { // backward migration + to.OldField = from.NewField + } + return nil +} + +// --------- V2 struct definition below --------- + +type V2 struct { + Name string + NewField string +} + +func (to *V2) ConvertFrom(from interface{}) error { + if from, ok := from.(V1); ok { // forward migration + to.NewField = from.OldField + } + if from, ok := from.(V3); ok { // backward migration + to.NewField = from.FinalField[0] + } + return nil +} + +// --------- V3 struct definition below --------- + +type V3 struct { + Name []string + FinalField []string +} + +func (to *V3) ConvertFrom(from interface{}) error { + if from, ok := from.(V2); ok { // forward migration + to.FinalField = []string{from.NewField} + } + return nil +} +``` + +At this point we could convert in either direction, for example a +`V3` struct could convert to a `V1` struct, with the caveat that there +may be data loss, as might need to happen due to changes in the data shapes. + +## Contributing + +If you would like to contribute to this repository, please see the +[CONTRIBUTING.md](CONTRIBUTING.md). diff --git a/vendor/github.com/anchore/go-struct-converter/chain.go b/vendor/github.com/anchore/go-struct-converter/chain.go new file mode 100644 index 0000000000..41aa0e1d7f --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/chain.go @@ -0,0 +1,95 @@ +package converter + +import ( + "fmt" + "reflect" +) + +// NewChain takes a set of structs, in order, to allow for accurate chain.Convert(from, &to) calls. NewChain should +// be called with struct values in a manner similar to this: +// converter.NewChain(v1.Document{}, v2.Document{}, v3.Document{}) +func NewChain(structs ...interface{}) Chain { + out := Chain{} + for _, s := range structs { + typ := reflect.TypeOf(s) + if isPtr(typ) { // these shouldn't be pointers, but check just to be safe + typ = typ.Elem() + } + out.Types = append(out.Types, typ) + } + return out +} + +// Chain holds a set of types with which to migrate through when a `chain.Convert` call is made +type Chain struct { + Types []reflect.Type +} + +// Convert converts from one type in the chain to the target type, calling each conversion in between +func (c Chain) Convert(from interface{}, to interface{}) (err error) { + fromValue := reflect.ValueOf(from) + fromType := fromValue.Type() + + // handle incoming pointers + for isPtr(fromType) { + fromValue = fromValue.Elem() + fromType = fromType.Elem() + } + + toValuePtr := reflect.ValueOf(to) + toTypePtr := toValuePtr.Type() + + if !isPtr(toTypePtr) { + return fmt.Errorf("TO struct provided not a pointer, unable to set values: %v", to) + } + + // toValue must be a pointer but need a reference to the struct type directly + toValue := toValuePtr.Elem() + toType := toValue.Type() + + fromIdx := -1 + toIdx := -1 + + for i, typ := range c.Types { + if typ == fromType { + fromIdx = i + } + if typ == toType { + toIdx = i + } + } + + if fromIdx == -1 { + return fmt.Errorf("invalid FROM type provided, not in the conversion chain: %s", fromType.Name()) + } + + if toIdx == -1 { + return fmt.Errorf("invalid TO type provided, not in the conversion chain: %s", toType.Name()) + } + + last := from + for i := fromIdx; i != toIdx; { + // skip the first index, because that is the from type - start with the next conversion in the chain + if fromIdx < toIdx { + i++ + } else { + i-- + } + + var next interface{} + if i == toIdx { + next = to + } else { + nextVal := reflect.New(c.Types[i]) + next = nextVal.Interface() // this will be a pointer, which is fine to pass to both from and to in Convert + } + + if err = Convert(last, next); err != nil { + return err + } + + last = next + } + + return nil +} diff --git a/vendor/github.com/anchore/go-struct-converter/converter.go b/vendor/github.com/anchore/go-struct-converter/converter.go new file mode 100644 index 0000000000..57d1b332db --- /dev/null +++ b/vendor/github.com/anchore/go-struct-converter/converter.go @@ -0,0 +1,334 @@ +package converter + +import ( + "fmt" + "reflect" + "strconv" +) + +// ConvertFrom interface allows structs to define custom conversion functions if the automated reflection-based Convert +// is not able to convert properties due to name changes or other factors. +type ConvertFrom interface { + ConvertFrom(interface{}) error +} + +// Convert takes two objects, e.g. v2_1.Document and &v2_2.Document{} and attempts to map all the properties from one +// to the other. After the automatic mapping, if a struct implements the ConvertFrom interface, this is called to +// perform any additional conversion logic necessary. +func Convert(from interface{}, to interface{}) error { + fromValue := reflect.ValueOf(from) + + toValuePtr := reflect.ValueOf(to) + toTypePtr := toValuePtr.Type() + + if !isPtr(toTypePtr) { + return fmt.Errorf("TO value provided was not a pointer, unable to set value: %v", to) + } + + toValue, err := getValue(fromValue, toTypePtr) + if err != nil { + return err + } + + // don't set nil values + if toValue == nilValue { + return nil + } + + // toValuePtr is the passed-in pointer, toValue is also the same type of pointer + toValuePtr.Elem().Set(toValue.Elem()) + return nil +} + +func getValue(fromValue reflect.Value, targetType reflect.Type) (reflect.Value, error) { + var err error + + fromType := fromValue.Type() + + var toValue reflect.Value + + // handle incoming pointer Types + if isPtr(fromType) { + if fromValue.IsNil() { + return nilValue, nil + } + fromValue = fromValue.Elem() + if !fromValue.IsValid() || fromValue.IsZero() { + return nilValue, nil + } + fromType = fromValue.Type() + } + + baseTargetType := targetType + if isPtr(targetType) { + baseTargetType = targetType.Elem() + } + + switch { + case isStruct(fromType) && isStruct(baseTargetType): + // this always creates a pointer type + toValue = reflect.New(baseTargetType) + toValue = toValue.Elem() + + for i := 0; i < fromType.NumField(); i++ { + fromField := fromType.Field(i) + fromFieldValue := fromValue.Field(i) + + toField, exists := baseTargetType.FieldByName(fromField.Name) + if !exists { + continue + } + toFieldType := toField.Type + + toFieldValue := toValue.FieldByName(toField.Name) + + newValue, err := getValue(fromFieldValue, toFieldType) + if err != nil { + return nilValue, err + } + + if newValue == nilValue { + continue + } + + toFieldValue.Set(newValue) + } + + // allow structs to implement a custom convert function from previous/next version struct + if reflect.PtrTo(baseTargetType).Implements(convertFromType) { + convertFrom := toValue.Addr().MethodByName(convertFromName) + if !convertFrom.IsValid() { + return nilValue, fmt.Errorf("unable to get ConvertFrom method") + } + args := []reflect.Value{fromValue} + out := convertFrom.Call(args) + err := out[0].Interface() + if err != nil { + return nilValue, fmt.Errorf("an error occurred calling %s.%s: %v", baseTargetType.Name(), convertFromName, err) + } + } + case isSlice(fromType) && isSlice(baseTargetType): + if fromValue.IsNil() { + return nilValue, nil + } + + length := fromValue.Len() + targetElementType := baseTargetType.Elem() + toValue = reflect.MakeSlice(baseTargetType, length, length) + for i := 0; i < length; i++ { + v, err := getValue(fromValue.Index(i), targetElementType) + if err != nil { + return nilValue, err + } + if v.IsValid() { + toValue.Index(i).Set(v) + } + } + case isMap(fromType) && isMap(baseTargetType): + if fromValue.IsNil() { + return nilValue, nil + } + + keyType := baseTargetType.Key() + elementType := baseTargetType.Elem() + toValue = reflect.MakeMap(baseTargetType) + for _, fromKey := range fromValue.MapKeys() { + fromVal := fromValue.MapIndex(fromKey) + k, err := getValue(fromKey, keyType) + if err != nil { + return nilValue, err + } + v, err := getValue(fromVal, elementType) + if err != nil { + return nilValue, err + } + if k == nilValue || v == nilValue { + continue + } + if v == nilValue { + continue + } + if k.IsValid() && v.IsValid() { + toValue.SetMapIndex(k, v) + } + } + default: + // TODO determine if there are other conversions + toValue = fromValue + } + + // handle non-pointer returns -- the reflect.New earlier always creates a pointer + if !isPtr(baseTargetType) { + toValue = fromPtr(toValue) + } + + toValue, err = convertValueTypes(toValue, baseTargetType) + + if err != nil { + return nilValue, err + } + + // handle elements which are now pointers + if isPtr(targetType) { + toValue = toPtr(toValue) + } + + return toValue, nil +} + +// convertValueTypes takes a value and a target type, and attempts to convert +// between the Types - e.g. string -> int. when this function is called the value +func convertValueTypes(value reflect.Value, targetType reflect.Type) (reflect.Value, error) { + typ := value.Type() + switch { + // if the Types are the same, just return the value + case typ.Kind() == targetType.Kind(): + return value, nil + case value.IsZero() && isPrimitive(targetType): + + case isPrimitive(typ) && isPrimitive(targetType): + // get a string representation of the value + str := fmt.Sprintf("%v", value.Interface()) // TODO is there a better way to get a string representation? + var err error + var out interface{} + switch { + case isString(targetType): + out = str + case isBool(targetType): + out, err = strconv.ParseBool(str) + case isInt(targetType): + out, err = strconv.Atoi(str) + case isUint(targetType): + out, err = strconv.ParseUint(str, 10, 64) + case isFloat(targetType): + out, err = strconv.ParseFloat(str, 64) + } + + if err != nil { + return nilValue, err + } + + v := reflect.ValueOf(out) + + v = v.Convert(targetType) + + return v, nil + case isSlice(typ) && isSlice(targetType): + // this should already be handled in getValue + case isSlice(typ): + // this may be lossy + if value.Len() > 0 { + v := value.Index(0) + v, err := convertValueTypes(v, targetType) + if err != nil { + return nilValue, err + } + return v, nil + } + return convertValueTypes(nilValue, targetType) + case isSlice(targetType): + elementType := targetType.Elem() + v, err := convertValueTypes(value, elementType) + if err != nil { + return nilValue, err + } + if v == nilValue { + return v, nil + } + slice := reflect.MakeSlice(targetType, 1, 1) + slice.Index(0).Set(v) + return slice, nil + } + + return nilValue, fmt.Errorf("unable to convert from: %v to %v", value.Interface(), targetType.Name()) +} + +func isPtr(typ reflect.Type) bool { + return typ.Kind() == reflect.Ptr +} + +func isPrimitive(typ reflect.Type) bool { + return isString(typ) || isBool(typ) || isInt(typ) || isUint(typ) || isFloat(typ) +} + +func isString(typ reflect.Type) bool { + return typ.Kind() == reflect.String +} + +func isBool(typ reflect.Type) bool { + return typ.Kind() == reflect.Bool +} + +func isInt(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Int64: + return true + } + return false +} + +func isUint(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Uint, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64: + return true + } + return false +} + +func isFloat(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Float32, + reflect.Float64: + return true + } + return false +} + +func isStruct(typ reflect.Type) bool { + return typ.Kind() == reflect.Struct +} + +func isSlice(typ reflect.Type) bool { + return typ.Kind() == reflect.Slice +} + +func isMap(typ reflect.Type) bool { + return typ.Kind() == reflect.Map +} + +func toPtr(val reflect.Value) reflect.Value { + typ := val.Type() + if !isPtr(typ) { + // this creates a pointer type inherently + ptrVal := reflect.New(typ) + ptrVal.Elem().Set(val) + val = ptrVal + } + return val +} + +func fromPtr(val reflect.Value) reflect.Value { + if isPtr(val.Type()) { + val = val.Elem() + } + return val +} + +// convertFromName constant to find the ConvertFrom method +const convertFromName = "ConvertFrom" + +var ( + // nilValue is returned in a number of cases when a value should not be set + nilValue = reflect.ValueOf(nil) + + // convertFromType is the type to check for ConvertFrom implementations + convertFromType = reflect.TypeOf((*ConvertFrom)(nil)).Elem() +) diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go index b7b9f2a2b7..d590b48b6e 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go @@ -17,7 +17,6 @@ const ( LayerAnnotationNydusBlob = "containerd.io/snapshot/nydus-blob" LayerAnnotationNydusBlobDigest = "containerd.io/snapshot/nydus-blob-digest" LayerAnnotationNydusBlobSize = "containerd.io/snapshot/nydus-blob-size" - LayerAnnotationNydusBlobIDs = "containerd.io/snapshot/nydus-blob-ids" LayerAnnotationNydusBootstrap = "containerd.io/snapshot/nydus-bootstrap" LayerAnnotationNydusSourceChainID = "containerd.io/snapshot/nydus-source-chainid" diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go index dc0130aefe..fd9c49cda1 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go @@ -11,9 +11,10 @@ package converter import ( "archive/tar" + "bytes" "compress/gzip" "context" - "encoding/json" + "encoding/binary" "fmt" "io" "os" @@ -24,10 +25,12 @@ import ( "github.com/containerd/containerd/archive" "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/images/converter" "github.com/containerd/containerd/labels" "github.com/containerd/fifo" + "github.com/klauspost/compress/zstd" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -35,11 +38,14 @@ import ( "golang.org/x/sync/errgroup" "github.com/containerd/nydus-snapshotter/pkg/converter/tool" - "github.com/containerd/nydus-snapshotter/pkg/errdefs" + "github.com/containerd/nydus-snapshotter/pkg/label" ) -const bootstrapNameInTar = "image.boot" -const blobNameInTar = "image.blob" +const EntryBlob = "image.blob" +const EntryBootstrap = "image.boot" +const EntryBlobMeta = "blob.meta" +const EntryBlobMetaHeader = "blob.meta.header" +const EntryTOC = "rafs.blob.toc" const envNydusBuilder = "NYDUS_BUILDER" const envNydusWorkDir = "NYDUS_WORKDIR" @@ -113,152 +119,190 @@ func unpackOciTar(ctx context.Context, dst string, reader io.Reader) error { return nil } -// Unpack a Nydus formatted tar stream into a directory. -func unpackNydusTar(ctx context.Context, bootDst, blobDst string, ra content.ReaderAt) error { +// unpackNydusBlob unpacks a Nydus formatted tar stream into a directory. +// unpackBlob indicates whether to unpack blob data. +func unpackNydusBlob(bootDst, blobDst string, ra content.ReaderAt, unpackBlob bool) error { boot, err := os.OpenFile(bootDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { return errors.Wrapf(err, "write to bootstrap %s", bootDst) } defer boot.Close() - if err = unpackBootstrapFromNydusTar(ctx, ra, boot); err != nil { + if _, err = UnpackEntry(ra, EntryBootstrap, boot); err != nil { return errors.Wrap(err, "unpack bootstrap from nydus") } - blob, err := os.OpenFile(blobDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - if err != nil { - return errors.Wrapf(err, "write to blob %s", blobDst) - } - defer blob.Close() + if unpackBlob { + blob, err := os.OpenFile(blobDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return errors.Wrapf(err, "write to blob %s", blobDst) + } + defer blob.Close() - if err = unpackBlobFromNydusTar(ctx, ra, blob); err != nil { - return errors.Wrap(err, "unpack blob from nydus") + if _, err = UnpackEntry(ra, EntryBlob, blob); err != nil { + if errors.Is(err, ErrNotFound) { + // The nydus layer may contain only bootstrap and no blob + // data, which should be ignored. + return nil + } + return errors.Wrap(err, "unpack blob from nydus") + } } return nil } -// Unpack the bootstrap from nydus formatted tar stream (blob + bootstrap). -// The nydus formatted tar stream is a tar-like structure that arranges the -// data as follows: -// -// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` -func unpackBootstrapFromNydusTar(ctx context.Context, ra content.ReaderAt, target io.Writer) error { - cur := ra.Size() - reader := newSeekReader(ra) - +func seekFileByTarHeader(ra content.ReaderAt, targetName string, handle func(io.Reader, *tar.Header) error) error { const headerSize = 512 - // Seek from tail to head of nydus formatted tar stream to find nydus - // bootstrap data. - for { - if headerSize > cur { - return fmt.Errorf("invalid tar format at pos %d", cur) - } - - // Try to seek to the part of tar header. - var err error - cur, err = reader.Seek(cur-headerSize, io.SeekCurrent) - if err != nil { - return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize) - } - - tr := tar.NewReader(reader) - // Parse tar header. - hdr, err := tr.Next() - if err != nil { - return errors.Wrap(err, "parse tar header") - } - - if hdr.Name == bootstrapNameInTar { - // Try to seek to the part of tar data (bootstrap_data). - if hdr.Size > cur { - return fmt.Errorf("invalid tar format at pos %d", cur) - } - bootstrapOffset := cur - hdr.Size - _, err = reader.Seek(bootstrapOffset, io.SeekStart) - if err != nil { - return errors.Wrap(err, "seek to bootstrap data offset") - } - - // Copy tar data (bootstrap_data) to provided target writer. - if _, err := io.CopyN(target, reader, hdr.Size); err != nil { - return errors.Wrap(err, "copy bootstrap data to reader") - } - - return nil - } - - if cur == hdr.Size { - break - } + if headerSize > ra.Size() { + return fmt.Errorf("invalid nydus tar size %d", ra.Size()) } - return fmt.Errorf("can't find bootstrap in nydus tar") -} - -// Unpack the blob from nydus formatted tar stream (blob + bootstrap). -// The nydus formatted tar stream is a tar-like structure that arranges the -// data as follows: -// -// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` -func unpackBlobFromNydusTar(ctx context.Context, ra content.ReaderAt, target io.Writer) error { - cur := ra.Size() + cur := ra.Size() - headerSize reader := newSeekReader(ra) - const headerSize = 512 - - // Seek from tail to head of nydus formatted tar stream to find nydus - // bootstrap data. + // Seek from tail to head of nydus formatted tar stream to find + // target data. for { - if headerSize > cur { - break - } - - // Try to seek to the part of tar header. - var err error - cur, err = reader.Seek(cur-headerSize, io.SeekStart) + // Try to seek the part of tar header. + _, err := reader.Seek(cur, io.SeekStart) if err != nil { - return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize) + return errors.Wrapf(err, "seek %d for nydus tar header", cur) } - tr := tar.NewReader(reader) // Parse tar header. + tr := tar.NewReader(reader) hdr, err := tr.Next() if err != nil { - return errors.Wrap(err, "parse tar header") + return errors.Wrap(err, "parse nydus tar header") } - if hdr.Name == bootstrapNameInTar { - if hdr.Size > cur { - return fmt.Errorf("invalid tar format at pos %d", cur) - } - cur, err = reader.Seek(cur-hdr.Size, io.SeekStart) - if err != nil { - return errors.Wrap(err, "seek to bootstrap data offset") - } - } else if hdr.Name == blobNameInTar { - if hdr.Size > cur { - return fmt.Errorf("invalid tar format at pos %d", cur) - } + if cur < hdr.Size { + return fmt.Errorf("invalid nydus tar data, name %s, size %d", hdr.Name, hdr.Size) + } + + if hdr.Name == targetName { + // Try to seek the part of tar data. _, err = reader.Seek(cur-hdr.Size, io.SeekStart) if err != nil { - return errors.Wrap(err, "seek to blob data offset") + return errors.Wrap(err, "seek target data offset") } - if _, err := io.CopyN(target, reader, hdr.Size); err != nil { - return errors.Wrap(err, "copy blob data to reader") + dataReader := io.NewSectionReader(reader, cur-hdr.Size, hdr.Size) + + if err := handle(dataReader, hdr); err != nil { + return errors.Wrap(err, "handle target data") } + return nil } + + cur = cur - hdr.Size - headerSize + if cur < 0 { + break + } } - return nil + return errors.Wrapf(ErrNotFound, "can't find target %s by seeking tar", targetName) +} + +func seekFileByTOC(ra content.ReaderAt, targetName string, handle func(io.Reader, *tar.Header) error) (*TOCEntry, error) { + entrySize := 128 + var tocEntry *TOCEntry + + err := seekFileByTarHeader(ra, EntryTOC, func(tocEntryDataReader io.Reader, _ *tar.Header) error { + entryData, err := io.ReadAll(tocEntryDataReader) + if err != nil { + return errors.Wrap(err, "read toc entries") + } + if len(entryData)%entrySize != 0 { + return fmt.Errorf("invalid entries length %d", len(entryData)) + } + + count := len(entryData) / entrySize + for i := 0; i < count; i++ { + var entry TOCEntry + r := bytes.NewReader(entryData[i*entrySize : i*entrySize+entrySize]) + if err := binary.Read(r, binary.LittleEndian, &entry); err != nil { + return errors.Wrap(err, "read toc entries") + } + if entry.GetName() == targetName { + compressor, err := entry.GetCompressor() + if err != nil { + return errors.Wrap(err, "get compressor of entry") + } + compressedOffset := int64(entry.GetCompressedOffset()) + compressedSize := int64(entry.GetCompressedSize()) + sr := io.NewSectionReader(ra, compressedOffset, compressedSize) + + var rd io.Reader + switch compressor { + case CompressorZstd: + decoder, err := zstd.NewReader(sr) + if err != nil { + return errors.Wrap(err, "seek to target data offset") + } + defer decoder.Close() + rd = decoder + case CompressorNone: + rd = sr + default: + return fmt.Errorf("unsupported compressor %x", compressor) + } + + if err := handle(rd, nil); err != nil { + return errors.Wrap(err, "handle target entry data") + } + + tocEntry = &entry + + return nil + } + } + + return errors.Wrapf(ErrNotFound, "can't find target %s by seeking TOC", targetName) + }) + + return tocEntry, err +} + +// Unpack the file from nydus formatted tar stream. +// The nydus formatted tar stream is a tar-like structure that arranges the +// data as follows: +// +// `data | tar_header | ... | data | tar_header | [toc_entry | ... | toc_entry | tar_header]` +func UnpackEntry(ra content.ReaderAt, targetName string, target io.Writer) (*TOCEntry, error) { + handle := func(dataReader io.Reader, _ *tar.Header) error { + // Copy data to provided target writer. + if _, err := io.Copy(target, dataReader); err != nil { + return errors.Wrap(err, "copy target data to reader") + } + + return nil + } + + return seekFile(ra, targetName, handle) +} + +func seekFile(ra content.ReaderAt, targetName string, handle func(io.Reader, *tar.Header) error) (*TOCEntry, error) { + // Try seek target data by TOC. + entry, err := seekFileByTOC(ra, targetName, handle) + if err != nil { + if !errors.Is(err, ErrNotFound) { + return nil, errors.Wrap(err, "seek file by TOC") + } + } else { + return entry, nil + } + + // Seek target data by tar header, ensure compatible with old rafs blob format. + return nil, seekFileByTarHeader(ra, targetName, handle) } // Pack converts an OCI tar stream to nydus formatted stream with a tar-like // structure that arranges the data as follows: // -// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` +// `data | tar_header | data | tar_header | [toc_entry | ... | toc_entry | tar_header]` // // The caller should write OCI tar stream into the returned `io.WriteCloser`, // then the Pack method will write the nydus formatted stream to `dest` @@ -267,6 +311,24 @@ func unpackBlobFromNydusTar(ctx context.Context, ra content.ReaderAt, target io. // Important: the caller must check `io.WriteCloser.Close() == nil` to ensure // the conversion workflow is finished. func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, error) { + if opt.FsVersion == "" { + opt.FsVersion = "6" + } + + builderPath := getBuilder(opt.BuilderPath) + opt.features = tool.DetectFeatures(builderPath, []tool.Feature{tool.FeatureTar2Rafs}) + + if opt.OCIRef { + if opt.FsVersion == "6" { + return packFromTar(ctx, dest, opt) + } + return nil, fmt.Errorf("oci ref can only be supported by fs version 6") + } + + if opt.features.Contains(tool.FeatureTar2Rafs) { + return packFromTar(ctx, dest, opt) + } + workDir, err := ensureWorkDir(opt.WorkDir) if err != nil { return nil, errors.Wrap(err, "ensure work directory") @@ -295,9 +357,7 @@ func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, }() wc := newWriteCloser(pw, func() error { - defer func() { - os.RemoveAll(workDir) - }() + defer os.RemoveAll(workDir) // Because PipeWriter#Close is called does not mean that the PipeReader // has finished reading all the data, and unpack may not be complete yet, @@ -313,15 +373,19 @@ func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, go func() { err := tool.Pack(tool.PackOption{ - BuilderPath: getBuilder(opt.BuilderPath), + BuilderPath: builderPath, BlobPath: blobPath, FsVersion: opt.FsVersion, SourcePath: sourceDir, ChunkDictPath: opt.ChunkDictPath, PrefetchPatterns: opt.PrefetchPatterns, + AlignedChunk: opt.AlignedChunk, + ChunkSize: opt.ChunkSize, Compressor: opt.Compressor, Timeout: opt.Timeout, + + Features: opt.features, }) if err != nil { pw.CloseWithError(errors.Wrapf(err, "convert blob for %s", sourceDir)) @@ -341,6 +405,117 @@ func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, return wc, nil } +func packFromTar(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, error) { + workDir, err := ensureWorkDir(opt.WorkDir) + if err != nil { + return nil, errors.Wrap(err, "ensure work directory") + } + defer func() { + if err != nil { + os.RemoveAll(workDir) + } + }() + + rafsBlobPath := filepath.Join(workDir, "blob.rafs") + rafsBlobFifo, err := fifo.OpenFifo(ctx, rafsBlobPath, syscall.O_CREAT|syscall.O_RDONLY|syscall.O_NONBLOCK, 0644) + if err != nil { + return nil, errors.Wrapf(err, "create fifo file") + } + + tarBlobPath := filepath.Join(workDir, "blob.targz") + tarBlobFifo, err := fifo.OpenFifo(ctx, tarBlobPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_NONBLOCK, 0644) + if err != nil { + defer rafsBlobFifo.Close() + return nil, errors.Wrapf(err, "create fifo file") + } + + pr, pw := io.Pipe() + eg := errgroup.Group{} + + wc := newWriteCloser(pw, func() error { + defer os.RemoveAll(workDir) + if err := eg.Wait(); err != nil { + return errors.Wrapf(err, "convert nydus ref") + } + return nil + }) + + eg.Go(func() error { + defer tarBlobFifo.Close() + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(tarBlobFifo, pr, *buffer); err != nil { + return errors.Wrapf(err, "copy targz to fifo") + } + return nil + }) + + eg.Go(func() error { + defer rafsBlobFifo.Close() + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(dest, rafsBlobFifo, *buffer); err != nil { + return errors.Wrapf(err, "copy blob meta fifo to nydus blob") + } + return nil + }) + + eg.Go(func() error { + var err error + if opt.OCIRef { + err = tool.Pack(tool.PackOption{ + BuilderPath: getBuilder(opt.BuilderPath), + + OCIRef: opt.OCIRef, + BlobPath: rafsBlobPath, + SourcePath: tarBlobPath, + Timeout: opt.Timeout, + + Features: opt.features, + }) + } else { + err = tool.Pack(tool.PackOption{ + BuilderPath: getBuilder(opt.BuilderPath), + + BlobPath: rafsBlobPath, + FsVersion: opt.FsVersion, + SourcePath: tarBlobPath, + ChunkDictPath: opt.ChunkDictPath, + PrefetchPatterns: opt.PrefetchPatterns, + AlignedChunk: opt.AlignedChunk, + ChunkSize: opt.ChunkSize, + BatchSize: opt.BatchSize, + Compressor: opt.Compressor, + Timeout: opt.Timeout, + + Features: opt.features, + }) + } + if err != nil { + // Without handling the returned error because we just only + // focus on the command exit status in `tool.Pack`. + wc.Close() + } + return errors.Wrapf(err, "call builder") + }) + + return wc, nil +} + +func calcBlobTOCDigest(ra content.ReaderAt) (*digest.Digest, error) { + digester := digest.Canonical.Digester() + if err := seekFileByTarHeader(ra, EntryTOC, func(tocData io.Reader, _ *tar.Header) error { + if _, err := io.Copy(digester.Hash(), tocData); err != nil { + return errors.Wrap(err, "calc toc data and header digest") + } + return nil + }); err != nil { + return nil, err + } + tocDigest := digester.Digest() + return &tocDigest, nil +} + // Merge multiple nydus bootstraps (from each layer of image) to a final // bootstrap. And due to the possibility of enabling the `ChunkDictPath` // option causes the data deduplication, it will return the actual blob @@ -352,22 +527,40 @@ func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption) } defer os.RemoveAll(workDir) - eg, ctx := errgroup.WithContext(ctx) + getBootstrapPath := func(layerIdx int) string { + digestHex := layers[layerIdx].Digest.Hex() + if originalDigest := layers[layerIdx].OriginalDigest; originalDigest != nil { + return filepath.Join(workDir, originalDigest.Hex()) + } + return filepath.Join(workDir, digestHex) + } + + eg, _ := errgroup.WithContext(ctx) sourceBootstrapPaths := []string{} + rafsBlobDigests := []string{} + rafsBlobSizes := []int64{} + rafsBlobTOCDigests := []string{} for idx := range layers { - sourceBootstrapPaths = append(sourceBootstrapPaths, filepath.Join(workDir, layers[idx].Digest.Hex())) + sourceBootstrapPaths = append(sourceBootstrapPaths, getBootstrapPath(idx)) + if layers[idx].OriginalDigest != nil { + rafsBlobDigests = append(rafsBlobDigests, layers[idx].Digest.Hex()) + rafsBlobSizes = append(rafsBlobSizes, layers[idx].ReaderAt.Size()) + rafsBlobTOCDigest, err := calcBlobTOCDigest(layers[idx].ReaderAt) + if err != nil { + return nil, errors.Wrapf(err, "calc blob toc digest for layer %s", layers[idx].Digest) + } + rafsBlobTOCDigests = append(rafsBlobTOCDigests, rafsBlobTOCDigest.Hex()) + } eg.Go(func(idx int) func() error { return func() error { - layer := layers[idx] - // Use the hex hash string of whole tar blob as the bootstrap name. - bootstrap, err := os.Create(filepath.Join(workDir, layer.Digest.Hex())) + bootstrap, err := os.Create(getBootstrapPath(idx)) if err != nil { return errors.Wrap(err, "create source bootstrap") } defer bootstrap.Close() - if err := unpackBootstrapFromNydusTar(ctx, layer.ReaderAt, bootstrap); err != nil { + if _, err := UnpackEntry(layers[idx].ReaderAt, EntryBootstrap, bootstrap); err != nil { return errors.Wrap(err, "unpack nydus tar") } @@ -386,11 +579,16 @@ func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption) BuilderPath: getBuilder(opt.BuilderPath), SourceBootstrapPaths: sourceBootstrapPaths, - TargetBootstrapPath: targetBootstrapPath, - ChunkDictPath: opt.ChunkDictPath, - PrefetchPatterns: opt.PrefetchPatterns, - OutputJSONPath: filepath.Join(workDir, "merge-output.json"), - Timeout: opt.Timeout, + RafsBlobDigests: rafsBlobDigests, + RafsBlobSizes: rafsBlobSizes, + RafsBlobTOCDigests: rafsBlobTOCDigests, + + TargetBootstrapPath: targetBootstrapPath, + ChunkDictPath: opt.ChunkDictPath, + ParentBootstrapPath: opt.ParentBootstrapPath, + PrefetchPatterns: opt.PrefetchPatterns, + OutputJSONPath: filepath.Join(workDir, "merge-output.json"), + Timeout: opt.Timeout, }) if err != nil { return nil, errors.Wrap(err, "merge bootstrap") @@ -399,7 +597,7 @@ func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption) var rc io.ReadCloser if opt.WithTar { - rc, err = packToTar(targetBootstrapPath, fmt.Sprintf("image/%s", bootstrapNameInTar), false) + rc, err = packToTar(targetBootstrapPath, fmt.Sprintf("image/%s", EntryBootstrap), false) if err != nil { return nil, errors.Wrap(err, "pack bootstrap to tar") } @@ -428,8 +626,8 @@ func Unpack(ctx context.Context, ra content.ReaderAt, dest io.Writer, opt Unpack } defer os.RemoveAll(workDir) - bootPath, blobPath := filepath.Join(workDir, bootstrapNameInTar), filepath.Join(workDir, blobNameInTar) - if err = unpackNydusTar(ctx, bootPath, blobPath, ra); err != nil { + bootPath, blobPath := filepath.Join(workDir, EntryBootstrap), filepath.Join(workDir, EntryBlob) + if err = unpackNydusBlob(bootPath, blobPath, ra, !opt.Stream); err != nil { return errors.Wrap(err, "unpack nydus tar") } @@ -440,16 +638,35 @@ func Unpack(ctx context.Context, ra content.ReaderAt, dest io.Writer, opt Unpack } defer blobFifo.Close() + unpackOpt := tool.UnpackOption{ + BuilderPath: getBuilder(opt.BuilderPath), + BootstrapPath: bootPath, + BlobPath: blobPath, + TarPath: tarPath, + Timeout: opt.Timeout, + } + + if opt.Stream { + proxy, err := setupContentStoreProxy(opt.WorkDir, ra) + if err != nil { + return errors.Wrap(err, "new content store proxy") + } + defer proxy.close() + + // generate backend config file + backendConfigStr := fmt.Sprintf(`{"version":2,"backend":{"type":"http-proxy","http-proxy":{"addr":"%s"}}}`, proxy.socketPath) + backendConfigPath := filepath.Join(workDir, "backend-config.json") + if err := os.WriteFile(backendConfigPath, []byte(backendConfigStr), 0644); err != nil { + return errors.Wrap(err, "write backend config") + } + unpackOpt.BlobPath = "" + unpackOpt.BackendConfigPath = backendConfigPath + } + unpackErrChan := make(chan error) go func() { defer close(unpackErrChan) - err := tool.Unpack(tool.UnpackOption{ - BuilderPath: getBuilder(opt.BuilderPath), - BootstrapPath: bootPath, - BlobPath: blobPath, - TarPath: tarPath, - Timeout: opt.Timeout, - }) + err := tool.Unpack(unpackOpt) if err != nil { blobFifo.Close() unpackErrChan <- err @@ -476,11 +693,11 @@ func IsNydusBlobAndExists(ctx context.Context, cs content.Store, desc ocispec.De return false } - return IsNydusBlob(ctx, desc) + return IsNydusBlob(desc) } -// IsNydusBlob returns true when the specified descriptor is nydus blob format. -func IsNydusBlob(ctx context.Context, desc ocispec.Descriptor) bool { +// IsNydusBlob returns true when the specified descriptor is nydus blob layer. +func IsNydusBlob(desc ocispec.Descriptor) bool { if desc.Annotations == nil { return false } @@ -489,6 +706,16 @@ func IsNydusBlob(ctx context.Context, desc ocispec.Descriptor) bool { return hasAnno } +// IsNydusBootstrap returns true when the specified descriptor is nydus bootstrap layer. +func IsNydusBootstrap(desc ocispec.Descriptor) bool { + if desc.Annotations == nil { + return false + } + + _, hasAnno := desc.Annotations[LayerAnnotationNydusBootstrap] + return hasAnno +} + // LayerConvertFunc returns a function which converts an OCI image layer to // a nydus blob layer, and set the media type to "application/vnd.oci.image.layer.nydus.blob.v1". func LayerConvertFunc(opt PackOption) converter.ConvertFunc { @@ -497,6 +724,11 @@ func LayerConvertFunc(opt PackOption) converter.ConvertFunc { return nil, nil } + // Skip the conversion of nydus layer. + if IsNydusBlob(desc) || IsNydusBootstrap(desc) { + return nil, nil + } + ra, err := cs.ReaderAt(ctx, desc) if err != nil { return nil, errors.Wrap(err, "get source blob reader") @@ -511,9 +743,14 @@ func LayerConvertFunc(opt PackOption) converter.ConvertFunc { } defer dst.Close() - tr, err := compression.DecompressStream(rdr) - if err != nil { - return nil, errors.Wrap(err, "decompress blob stream") + var tr io.ReadCloser + if opt.OCIRef { + tr = io.NopCloser(rdr) + } else { + tr, err = compression.DecompressStream(rdr) + if err != nil { + return nil, errors.Wrap(err, "decompress blob stream") + } } digester := digest.SHA256.Digester() @@ -574,14 +811,12 @@ func LayerConvertFunc(opt PackOption) converter.ConvertFunc { }, } - if opt.Backend != nil { - blobRa, err := cs.ReaderAt(ctx, newDesc) - if err != nil { - return nil, errors.Wrap(err, "get nydus blob reader") - } - defer blobRa.Close() + if opt.OCIRef { + newDesc.Annotations[label.NydusRefLayer] = desc.Digest.String() + } - if err := opt.Backend.Push(ctx, blobRa, blobDigest); err != nil { + if opt.Backend != nil { + if err := opt.Backend.Push(ctx, cs, newDesc); err != nil { return nil, errors.Wrap(err, "push to storage backend") } } @@ -595,11 +830,15 @@ func LayerConvertFunc(opt PackOption) converter.ConvertFunc { // the index conversion and the manifest conversion. func ConvertHookFunc(opt MergeOption) converter.ConvertHookFunc { return func(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error) { + // If the previous conversion did not occur, the `newDesc` may be nil. + if newDesc == nil { + return &orgDesc, nil + } switch { case images.IsIndexType(newDesc.MediaType): return convertIndex(ctx, cs, orgDesc, newDesc) case images.IsManifestType(newDesc.MediaType): - return convertManifest(ctx, cs, newDesc, opt) + return convertManifest(ctx, cs, orgDesc, newDesc, opt) default: return newDesc, nil } @@ -636,6 +875,13 @@ func convertIndex(ctx context.Context, cs content.Store, orgDesc ocispec.Descrip manifest.Platform.OSFeatures = append(manifest.Platform.OSFeatures, ManifestOSFeatureNydus) index.Manifests[i] = manifest } + + // If the converted manifest list contains only one manifest, + // convert it directly to manifest. + if len(index.Manifests) == 1 { + return &index.Manifests[0], nil + } + // Update image index in content store. newIndexDesc, err := writeJSON(ctx, cs, index, *newDesc, indexLabels) if err != nil { @@ -644,10 +890,23 @@ func convertIndex(ctx context.Context, cs content.Store, orgDesc ocispec.Descrip return newIndexDesc, nil } +// isNydusImage checks if the last layer is nydus bootstrap, +// so that we can ensure it is a nydus image. +func isNydusImage(manifest *ocispec.Manifest) bool { + layers := manifest.Layers + if len(layers) != 0 { + desc := layers[len(layers)-1] + if IsNydusBootstrap(desc) { + return true + } + } + return false +} + // convertManifest merges all the nydus blob layers into a // nydus bootstrap layer, update the image config, // and modify the image manifest. -func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, error) { +func convertManifest(ctx context.Context, cs content.Store, oldDesc ocispec.Descriptor, newDesc *ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, error) { var manifest ocispec.Manifest manifestDesc := *newDesc manifestLabels, err := readJSON(ctx, cs, &manifest, manifestDesc) @@ -655,14 +914,21 @@ func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Des return nil, errors.Wrap(err, "read manifest json") } + if isNydusImage(&manifest) { + return &manifestDesc, nil + } + + // This option needs to be enabled for image scenario. + opt.WithTar = true + + // If the original image is already an OCI type, we should forcibly set the + // bootstrap layer to the OCI type. + if !opt.OCI && oldDesc.MediaType == ocispec.MediaTypeImageManifest { + opt.OCI = true + } + // Append bootstrap layer to manifest. - bootstrapDesc, blobDescs, err := MergeLayers(ctx, cs, manifest.Layers, MergeOption{ - BuilderPath: opt.BuilderPath, - WorkDir: opt.WorkDir, - ChunkDictPath: opt.ChunkDictPath, - FsVersion: opt.FsVersion, - WithTar: true, - }) + bootstrapDesc, blobDescs, err := MergeLayers(ctx, cs, manifest.Layers, opt) if err != nil { return nil, errors.Wrap(err, "merge nydus layers") } @@ -678,7 +944,8 @@ func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Des // Affected by chunk dict, the blob list referenced by final bootstrap // are from different layers, part of them are from original layers, part // from chunk dict bootstrap, so we need to rewrite manifest's layers here. - manifest.Layers = append(blobDescs, *bootstrapDesc) + blobDescs := append(blobDescs, *bootstrapDesc) + manifest.Layers = blobDescs } // Update the gc label of bootstrap layer @@ -691,8 +958,13 @@ func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Des if err != nil { return nil, errors.Wrap(err, "read image config") } + bootstrapHistory := ocispec.History{ + CreatedBy: "Nydus Converter", + Comment: "Nydus Bootstrap Layer", + } if opt.Backend != nil { config.RootFS.DiffIDs = []digest.Digest{digest.Digest(bootstrapDesc.Annotations[LayerAnnotationUncompressed])} + config.History = []ocispec.History{bootstrapHistory} } else { config.RootFS.DiffIDs = make([]digest.Digest, 0, len(manifest.Layers)) for i, layer := range manifest.Layers { @@ -700,6 +972,9 @@ func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Des // Remove useless annotation. delete(manifest.Layers[i].Annotations, LayerAnnotationUncompressed) } + // Append history item for bootstrap layer, to ensure the history consistency. + // See https://github.com/distribution/distribution/blob/e5d5810851d1f17a5070e9b6f940d8af98ea3c29/manifest/schema1/config_builder.go#L136 + config.History = append(config.History, bootstrapHistory) } // Update image config in content store. newConfigDesc, err := writeJSON(ctx, cs, config, manifest.Config, configLabels) @@ -710,6 +985,11 @@ func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Des // Update the config gc label manifestLabels[configGCLabelKey] = newConfigDesc.Digest.String() + // Associate a reference to the original OCI manifest. + // See the `subject` field description in + // https://github.com/opencontainers/image-spec/blob/main/manifest.md#image-manifest-property-descriptions + manifest.Subject = &oldDesc + // Update image manifest in content store. newManifestDesc, err := writeJSON(ctx, cs, manifest, manifestDesc, manifestLabels) if err != nil { @@ -726,33 +1006,45 @@ func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descript layers := []Layer{} var chainID digest.Digest - for _, blobDesc := range descs { - ra, err := cs.ReaderAt(ctx, blobDesc) + nydusBlobDigests := []digest.Digest{} + for _, nydusBlobDesc := range descs { + ra, err := cs.ReaderAt(ctx, nydusBlobDesc) if err != nil { - return nil, nil, errors.Wrapf(err, "get reader for blob %q", blobDesc.Digest) + return nil, nil, errors.Wrapf(err, "get reader for blob %q", nydusBlobDesc.Digest) } defer ra.Close() + var originalDigest *digest.Digest + if opt.OCIRef { + digestStr := nydusBlobDesc.Annotations[label.NydusRefLayer] + _originalDigest, err := digest.Parse(digestStr) + if err != nil { + return nil, nil, errors.Wrapf(err, "invalid label %s=%s", label.NydusRefLayer, digestStr) + } + originalDigest = &_originalDigest + } layers = append(layers, Layer{ - Digest: blobDesc.Digest, - ReaderAt: ra, + Digest: nydusBlobDesc.Digest, + OriginalDigest: originalDigest, + ReaderAt: ra, }) if chainID == "" { - chainID = identity.ChainID([]digest.Digest{blobDesc.Digest}) + chainID = identity.ChainID([]digest.Digest{nydusBlobDesc.Digest}) } else { - chainID = identity.ChainID([]digest.Digest{chainID, blobDesc.Digest}) + chainID = identity.ChainID([]digest.Digest{chainID, nydusBlobDesc.Digest}) } + nydusBlobDigests = append(nydusBlobDigests, nydusBlobDesc.Digest) } // Merge all nydus bootstraps into a final nydus bootstrap. pr, pw := io.Pipe() - blobDigestChan := make(chan []digest.Digest, 1) + originalBlobDigestChan := make(chan []digest.Digest, 1) go func() { defer pw.Close() - blobDigests, err := Merge(ctx, layers, pw, opt) + originalBlobDigests, err := Merge(ctx, layers, pw, opt) if err != nil { pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap")) } - blobDigestChan <- blobDigests + originalBlobDigestChan <- originalBlobDigests }() // Compress final nydus bootstrap to tar.gz and write into content store. @@ -791,10 +1083,17 @@ func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descript return nil, nil, errors.Wrap(err, "get info from content store") } - blobDigests := <-blobDigestChan + originalBlobDigests := <-originalBlobDigestChan blobDescs := []ocispec.Descriptor{} - blobIDs := []string{} - for _, blobDigest := range blobDigests { + + var blobDigests []digest.Digest + if opt.OCIRef { + blobDigests = nydusBlobDigests + } else { + blobDigests = originalBlobDigests + } + + for idx, blobDigest := range blobDigests { blobInfo, err := cs.Info(ctx, blobDigest) if err != nil { return nil, nil, errors.Wrap(err, "get info from content store") @@ -808,30 +1107,29 @@ func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descript LayerAnnotationNydusBlob: "true", }, } + if opt.OCIRef { + blobDesc.Annotations[label.NydusRefLayer] = layers[idx].OriginalDigest.String() + } blobDescs = append(blobDescs, blobDesc) - blobIDs = append(blobIDs, blobDigest.Hex()) - } - - blobIDsBytes, err := json.Marshal(blobIDs) - if err != nil { - return nil, nil, errors.Wrap(err, "marshal blob ids") } if opt.FsVersion == "" { - opt.FsVersion = "5" + opt.FsVersion = "6" + } + mediaType := images.MediaTypeDockerSchema2LayerGzip + if opt.OCI { + mediaType = ocispec.MediaTypeImageLayerGzip } bootstrapDesc := ocispec.Descriptor{ Digest: compressedDgst, Size: bootstrapInfo.Size, - MediaType: ocispec.MediaTypeImageLayerGzip, + MediaType: mediaType, Annotations: map[string]string{ LayerAnnotationUncompressed: uncompressedDgst.Digest().String(), LayerAnnotationFSVersion: opt.FsVersion, // Use this annotation to identify nydus bootstrap layer. LayerAnnotationNydusBootstrap: "true", - // Track all blob digests for nydus snapshotter. - LayerAnnotationNydusBlobIDs: string(blobIDsBytes), }, } diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/cs_proxy_unix.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/cs_proxy_unix.go new file mode 100644 index 0000000000..43c8e02287 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/cs_proxy_unix.go @@ -0,0 +1,168 @@ +//go:build !windows +// +build !windows + +/* + * Copyright (c) 2023. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +import ( + "archive/tar" + "context" + "fmt" + "io" + "net" + "net/http" + "os" + "strconv" + "strings" + + "github.com/containerd/containerd/content" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type contentStoreProxy struct { + socketPath string + server *http.Server +} + +func setupContentStoreProxy(workDir string, ra content.ReaderAt) (*contentStoreProxy, error) { + sockP, err := os.CreateTemp(workDir, "nydus-cs-proxy-*.sock") + if err != nil { + return nil, errors.Wrap(err, "create unix socket file") + } + if err := os.Remove(sockP.Name()); err != nil { + return nil, err + } + listener, err := net.Listen("unix", sockP.Name()) + if err != nil { + return nil, errors.Wrap(err, "listen unix socket when setup content store proxy") + } + + server := &http.Server{ + Handler: contentProxyHandler(ra), + } + + go func() { + if err := server.Serve(listener); err != nil && err != http.ErrServerClosed { + logrus.WithError(err).Warn("serve content store proxy") + } + }() + + return &contentStoreProxy{ + socketPath: sockP.Name(), + server: server, + }, nil +} + +func (p *contentStoreProxy) close() error { + defer os.Remove(p.socketPath) + if err := p.server.Shutdown(context.Background()); err != nil { + return errors.Wrap(err, "shutdown content store proxy") + } + return nil +} + +func parseRangeHeader(rangeStr string, totalLen int64) (start, wantedLen int64, err error) { + rangeList := strings.Split(rangeStr, "-") + start, err = strconv.ParseInt(rangeList[0], 10, 64) + if err != nil { + err = errors.Wrap(err, "parse range header") + return + } + if len(rangeList) == 2 { + var end int64 + end, err = strconv.ParseInt(rangeList[1], 10, 64) + if err != nil { + err = errors.Wrap(err, "parse range header") + return + } + wantedLen = end - start + 1 + } else { + wantedLen = totalLen - start + } + if start < 0 || start >= totalLen || wantedLen <= 0 { + err = fmt.Errorf("invalid range header: %s", rangeStr) + return + } + return +} + +func contentProxyHandler(ra content.ReaderAt) http.Handler { + var ( + dataReader io.Reader + curPos int64 + + tarHeader *tar.Header + totalLen int64 + ) + resetReader := func() { + // TODO: Handle error? + _, _ = seekFile(ra, EntryBlob, func(reader io.Reader, hdr *tar.Header) error { + dataReader, tarHeader = reader, hdr + return nil + }) + curPos = 0 + } + + resetReader() + if tarHeader != nil { + totalLen = tarHeader.Size + } else { + totalLen = ra.Size() + } + handler := func(w http.ResponseWriter, r *http.Request) { + switch r.Method { + case http.MethodHead: + { + w.Header().Set("Content-Length", strconv.FormatInt(totalLen, 10)) + w.Header().Set("Content-Type", "application/octet-stream") + return + } + case http.MethodGet: + { + start, wantedLen, err := parseRangeHeader(strings.TrimPrefix(r.Header.Get("Range"), "bytes="), totalLen) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + // TODO: Handle error? + _, _ = w.Write([]byte(err.Error())) + return + } + + // we need to make sure that the dataReader is at the right position + if start < curPos { + resetReader() + } + if start > curPos { + _, err = io.CopyN(io.Discard, dataReader, start-curPos) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + // TODO: Handle error? + _, _ = w.Write([]byte(err.Error())) + return + } + curPos = start + } + // then, the curPos must be equal to start + + readLen, err := io.CopyN(w, dataReader, wantedLen) + if err != nil && !errors.Is(err, io.EOF) { + w.WriteHeader(http.StatusInternalServerError) + // TODO: Handle error? + _, _ = w.Write([]byte(err.Error())) + return + } + curPos += readLen + w.Header().Set("Content-Length", strconv.FormatInt(readLen, 10)) + w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, start+readLen-1, totalLen)) + w.Header().Set("Content-Type", "application/octet-stream") + return + } + } + } + return http.HandlerFunc(handler) +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go index 55e98cc097..5c8284e95a 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go @@ -10,12 +10,11 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "os" "os/exec" "strings" "time" - "github.com/containerd/nydus-snapshotter/pkg/errdefs" "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -23,6 +22,10 @@ import ( var logger = logrus.WithField("module", "builder") +func isSignalKilled(err error) bool { + return strings.Contains(err.Error(), "signal: killed") +} + type PackOption struct { BuilderPath string @@ -33,35 +36,47 @@ type PackOption struct { ChunkDictPath string PrefetchPatterns string Compressor string + OCIRef bool + AlignedChunk bool + ChunkSize string + BatchSize string Timeout *time.Duration + + Features Features } type MergeOption struct { BuilderPath string SourceBootstrapPaths []string - TargetBootstrapPath string - ChunkDictPath string - PrefetchPatterns string - OutputJSONPath string - Timeout *time.Duration + RafsBlobDigests []string + RafsBlobTOCDigests []string + RafsBlobSizes []int64 + + TargetBootstrapPath string + ChunkDictPath string + ParentBootstrapPath string + PrefetchPatterns string + OutputJSONPath string + Timeout *time.Duration } type UnpackOption struct { - BuilderPath string - BootstrapPath string - BlobPath string - TarPath string - Timeout *time.Duration + BuilderPath string + BootstrapPath string + BlobPath string + BackendConfigPath string + TarPath string + Timeout *time.Duration } type outputJSON struct { Blobs []string } -func Pack(option PackOption) error { +func buildPackArgs(option PackOption) []string { if option.FsVersion == "" { - option.FsVersion = "5" + option.FsVersion = "6" } args := []string{ @@ -72,14 +87,37 @@ func Pack(option PackOption) error { "fs", "--blob", option.BlobPath, - "--source-type", - "directory", "--whiteout-spec", "none", "--fs-version", option.FsVersion, - "--inline-bootstrap", } + + if option.Features.Contains(FeatureTar2Rafs) { + args = append( + args, + "--type", + "tar-rafs", + "--blob-inline-meta", + ) + if option.FsVersion == "6" { + args = append( + args, + "--features", + "blob-toc", + ) + } + } else { + args = append( + args, + "--source-type", + "directory", + // Sames with `--blob-inline-meta`, it's used for compatibility + // with the old nydus-image builder. + "--inline-bootstrap", + ) + } + if option.ChunkDictPath != "" { args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath)) } @@ -89,6 +127,65 @@ func Pack(option PackOption) error { if option.Compressor != "" { args = append(args, "--compressor", option.Compressor) } + if option.AlignedChunk { + args = append(args, "--aligned-chunk") + } + if option.ChunkSize != "" { + args = append(args, "--chunk-size", option.ChunkSize) + } + if option.BatchSize != "" { + args = append(args, "--batch-size", option.BatchSize) + } + args = append(args, option.SourcePath) + + return args +} + +func Pack(option PackOption) error { + if option.OCIRef { + return packRef(option) + } + + ctx := context.Background() + var cancel context.CancelFunc + if option.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *option.Timeout) + defer cancel() + } + + args := buildPackArgs(option) + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args, " ")) + + cmd := exec.CommandContext(ctx, option.BuilderPath, args...) + cmd.Stdout = logger.Writer() + cmd.Stderr = logger.Writer() + cmd.Stdin = strings.NewReader(option.PrefetchPatterns) + + if err := cmd.Run(); err != nil { + if isSignalKilled(err) && option.Timeout != nil { + logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) + } else { + logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) + } + return err + } + + return nil +} + +func packRef(option PackOption) error { + args := []string{ + "create", + "--log-level", + "warn", + "--type", + "targz-ref", + "--blob-inline-meta", + "--features", + "blob-toc", + "--blob", + option.BlobPath, + } args = append(args, option.SourcePath) ctx := context.Background() @@ -98,15 +195,14 @@ func Pack(option PackOption) error { defer cancel() } - logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " ")) + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args, " ")) cmd := exec.CommandContext(ctx, option.BuilderPath, args...) cmd.Stdout = logger.Writer() cmd.Stderr = logger.Writer() - cmd.Stdin = strings.NewReader(option.PrefetchPatterns) if err := cmd.Run(); err != nil { - if errdefs.IsSignalKilled(err) && option.Timeout != nil { + if isSignalKilled(err) && option.Timeout != nil { logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) } else { logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) @@ -132,10 +228,26 @@ func Merge(option MergeOption) ([]digest.Digest, error) { if option.ChunkDictPath != "" { args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath)) } + if option.ParentBootstrapPath != "" { + args = append(args, "--parent-bootstrap", option.ParentBootstrapPath) + } if option.PrefetchPatterns == "" { option.PrefetchPatterns = "/" } args = append(args, option.SourceBootstrapPaths...) + if len(option.RafsBlobDigests) > 0 { + args = append(args, "--blob-digests", strings.Join(option.RafsBlobDigests, ",")) + } + if len(option.RafsBlobTOCDigests) > 0 { + args = append(args, "--blob-toc-digests", strings.Join(option.RafsBlobTOCDigests, ",")) + } + if len(option.RafsBlobSizes) > 0 { + sizes := []string{} + for _, size := range option.RafsBlobSizes { + sizes = append(sizes, fmt.Sprintf("%d", size)) + } + args = append(args, "--blob-sizes", strings.Join(sizes, ",")) + } ctx := context.Background() var cancel context.CancelFunc @@ -143,7 +255,7 @@ func Merge(option MergeOption) ([]digest.Digest, error) { ctx, cancel = context.WithTimeout(ctx, *option.Timeout) defer cancel() } - logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " ")) + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args, " ")) cmd := exec.CommandContext(ctx, option.BuilderPath, args...) cmd.Stdout = logger.Writer() @@ -151,7 +263,7 @@ func Merge(option MergeOption) ([]digest.Digest, error) { cmd.Stdin = strings.NewReader(option.PrefetchPatterns) if err := cmd.Run(); err != nil { - if errdefs.IsSignalKilled(err) && option.Timeout != nil { + if isSignalKilled(err) && option.Timeout != nil { logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) } else { logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) @@ -159,7 +271,7 @@ func Merge(option MergeOption) ([]digest.Digest, error) { return nil, errors.Wrap(err, "run merge command") } - outputBytes, err := ioutil.ReadFile(option.OutputJSONPath) + outputBytes, err := os.ReadFile(option.OutputJSONPath) if err != nil { return nil, errors.Wrapf(err, "read file %s", option.OutputJSONPath) } @@ -187,7 +299,10 @@ func Unpack(option UnpackOption) error { "--output", option.TarPath, } - if option.BlobPath != "" { + + if option.BackendConfigPath != "" { + args = append(args, "--backend-config", option.BackendConfigPath) + } else if option.BlobPath != "" { args = append(args, "--blob", option.BlobPath) } @@ -198,14 +313,14 @@ func Unpack(option UnpackOption) error { defer cancel() } - logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " ")) + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args, " ")) cmd := exec.CommandContext(ctx, option.BuilderPath, args...) cmd.Stdout = logger.Writer() cmd.Stderr = logger.Writer() if err := cmd.Run(); err != nil { - if errdefs.IsSignalKilled(err) && option.Timeout != nil { + if isSignalKilled(err) && option.Timeout != nil { logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) } else { logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/feature.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/feature.go new file mode 100644 index 0000000000..f20c11b73c --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/feature.go @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2023. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package tool + +import ( + "context" + "os" + "os/exec" + "regexp" + "sync" + + "github.com/sirupsen/logrus" + "golang.org/x/mod/semver" +) + +const envNydusDisableTar2Rafs = "NYDUS_DISABLE_TAR2RAFS" + +var currentVersion string +var currentVersionDetectOnce sync.Once +var disableTar2Rafs = os.Getenv(envNydusDisableTar2Rafs) != "" + +const ( + // The option `--type tar-rafs` enables converting OCI tar blob + // stream into nydus blob directly, the tar2rafs eliminates the + // need to decompress it to a local directory first, thus greatly + // accelerating the pack process. + FeatureTar2Rafs Feature = "--type tar-rafs" +) + +var featureMap = map[Feature]string{ + FeatureTar2Rafs: "v2.2", +} + +type Feature string +type Features []Feature + +func (features *Features) Contains(feature Feature) bool { + for _, feat := range *features { + if feat == feature { + return true + } + } + return false +} + +func (features *Features) Remove(feature Feature) { + found := -1 + for idx, feat := range *features { + if feat == feature { + found = idx + break + } + } + if found != -1 { + *features = append((*features)[:found], (*features)[found+1:]...) + } +} + +func detectVersion(msg []byte) string { + re := regexp.MustCompile(`Version:\s*v*(\d+.\d+.\d+)`) + matches := re.FindSubmatch(msg) + if len(matches) > 1 { + return string(matches[1]) + } + return "" +} + +// DetectFeatures returns supported feature list from required feature list. +func DetectFeatures(builder string, required Features) Features { + currentVersionDetectOnce.Do(func() { + if required.Contains(FeatureTar2Rafs) && disableTar2Rafs { + logrus.Warnf("the feature '%s' is disabled by env '%s'", FeatureTar2Rafs, envNydusDisableTar2Rafs) + } + + cmd := exec.CommandContext(context.Background(), builder, "--version") + output, err := cmd.Output() + if err != nil { + return + } + + currentVersion = detectVersion(output) + }) + + if currentVersion == "" { + return Features{} + } + + detectedFeatures := Features{} + for _, feature := range required { + requiredVersion := featureMap[feature] + if requiredVersion == "" { + detectedFeatures = append(detectedFeatures, feature) + continue + } + + // The feature is supported by current version + supported := semver.Compare(requiredVersion, "v"+currentVersion) <= 0 + if supported { + // It is an experimental feature, so we still provide an env + // variable to allow users to disable it. + if feature == FeatureTar2Rafs && disableTar2Rafs { + continue + } + detectedFeatures = append(detectedFeatures, feature) + } + } + + return detectedFeatures +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go index 9d0590a0c9..bbdea72ed2 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go @@ -8,24 +8,41 @@ package converter import ( "context" + "errors" + "fmt" + "strings" "time" "github.com/containerd/containerd/content" + "github.com/containerd/nydus-snapshotter/pkg/converter/tool" "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type Compressor = uint32 + +const ( + CompressorNone Compressor = 0x0001 + CompressorZstd Compressor = 0x0002 +) + +var ( + ErrNotFound = errors.New("data not found") ) type Layer struct { // Digest represents the hash of whole tar blob. Digest digest.Digest + // Digest represents the original OCI tar(.gz) blob. + OriginalDigest *digest.Digest // ReaderAt holds the reader of whole tar blob. ReaderAt content.ReaderAt } -// Backend uploads blobs generated by nydus-image builder to a backend storage such as: -// - oss: A object storage backend, which uses its SDK to upload blob file. +// Backend uploads blobs generated by nydus-image builder to a backend storage. type Backend interface { // Push pushes specified blob file to remote storage backend. - Push(ctx context.Context, ra content.ReaderAt, blobDigest digest.Digest) error + Push(ctx context.Context, cs content.Store, desc ocispec.Descriptor) error // Check checks whether a blob exists in remote storage backend, // blob exists -> return (blobPath, nil) // blob not exists -> return ("", err) @@ -40,7 +57,7 @@ type PackOption struct { // BuilderPath holds the path of `nydus-image` binary tool. BuilderPath string // FsVersion specifies nydus RAFS format version, possible - // values: `5`, `6` (EROFS-compatible), default is `5`. + // values: `5`, `6` (EROFS-compatible), default is `6`. FsVersion string // ChunkDictPath holds the bootstrap path of chunk dict image. ChunkDictPath string @@ -48,10 +65,22 @@ type PackOption struct { PrefetchPatterns string // Compressor specifies nydus blob compression algorithm. Compressor string + // OCIRef enables converting OCI tar(.gz) blob to nydus referenced blob. + OCIRef bool + // AlignedChunk aligns uncompressed data chunks to 4K, only for RAFS V5. + AlignedChunk bool + // ChunkSize sets the size of data chunks, must be power of two and between 0x1000-0x1000000. + ChunkSize string + // BacthSize sets the size of batch data chunks, must be power of two and between 0x1000-0x1000000 or zero. + BatchSize string // Backend uploads blobs generated by nydus-image builder to a backend storage. Backend Backend // Timeout cancels execution once exceed the specified time. Timeout *time.Duration + + // Features keeps a feature list supported by newer version of builder, + // It is detected automatically, so don't export it. + features tool.Features } type MergeOption struct { @@ -60,14 +89,20 @@ type MergeOption struct { // BuilderPath holds the path of `nydus-image` binary tool. BuilderPath string // FsVersion specifies nydus RAFS format version, possible - // values: `5`, `6` (EROFS-compatible), default is `5`. + // values: `5`, `6` (EROFS-compatible), default is `6`. FsVersion string // ChunkDictPath holds the bootstrap path of chunk dict image. ChunkDictPath string + // ParentBootstrapPath holds the bootstrap path of parent image. + ParentBootstrapPath string // PrefetchPatterns holds file path pattern list want to prefetch. PrefetchPatterns string // WithTar puts bootstrap into a tar stream (no gzip). WithTar bool + // OCI converts docker media types to OCI media types. + OCI bool + // OCIRef enables converting OCI tar(.gz) blob to nydus referenced blob. + OCIRef bool // Backend uploads blobs generated by nydus-image builder to a backend storage. Backend Backend // Timeout cancels execution once exceed the specified time. @@ -81,4 +116,62 @@ type UnpackOption struct { BuilderPath string // Timeout cancels execution once exceed the specified time. Timeout *time.Duration + // Stream enables streaming mode, which doesn't unpack the blob data to disk, + // but setup a http server to serve the blob data. + Stream bool +} + +type TOCEntry struct { + // Feature flags of entry + Flags uint32 + Reserved1 uint32 + // Name of entry data + Name [16]byte + // Sha256 of uncompressed entry data + UncompressedDigest [32]byte + // Offset of compressed entry data + CompressedOffset uint64 + // Size of compressed entry data + CompressedSize uint64 + // Size of uncompressed entry data + UncompressedSize uint64 + Reserved2 [44]byte +} + +func (entry *TOCEntry) GetCompressor() (Compressor, error) { + switch { + case entry.Flags&CompressorNone == CompressorNone: + return CompressorNone, nil + case entry.Flags&CompressorZstd == CompressorZstd: + return CompressorZstd, nil + } + return 0, fmt.Errorf("unsupported compressor, entry flags %x", entry.Flags) +} + +func (entry *TOCEntry) GetName() string { + var name strings.Builder + name.Grow(16) + for _, c := range entry.Name { + if c == 0 { + break + } + fmt.Fprintf(&name, "%c", c) + } + return name.String() +} + +func (entry *TOCEntry) GetUncompressedDigest() string { + return fmt.Sprintf("%x", entry.UncompressedDigest) +} + +func (entry *TOCEntry) GetCompressedOffset() uint64 { + return entry.CompressedOffset +} + +func (entry *TOCEntry) GetCompressedSize() uint64 { + return entry.CompressedSize +} + +func (entry *TOCEntry) GetUncompressedSize() uint64 { + return entry.UncompressedSize } diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go index 849d870b34..b0b04d8b5f 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go @@ -59,18 +59,20 @@ type seekReader struct { func (ra *seekReader) Read(p []byte) (int, error) { n, err := ra.ReaderAt.ReadAt(p, ra.pos) - ra.pos += int64(len(p)) + ra.pos += int64(n) return n, err } func (ra *seekReader) Seek(offset int64, whence int) (int64, error) { - if whence == io.SeekCurrent { + switch { + case whence == io.SeekCurrent: ra.pos += offset - } else if whence == io.SeekStart { + case whence == io.SeekStart: ra.pos = offset - } else { + default: return 0, fmt.Errorf("unsupported whence %d", whence) } + return ra.pos, nil } @@ -126,11 +128,12 @@ func packToTar(src string, name string, compress bool) (io.ReadCloser, error) { var finalErr error // Return the first error encountered to the other end and ignore others. - if err != nil { + switch { + case err != nil: finalErr = err - } else if err1 != nil { + case err1 != nil: finalErr = err1 - } else if err2 != nil { + case err2 != nil: finalErr = err2 } @@ -168,6 +171,9 @@ func readJSON(ctx context.Context, cs content.Store, x interface{}, desc ocispec return nil, err } labels := info.Labels + if labels == nil { + labels = map[string]string{} + } b, err := content.ReadBlob(ctx, cs, desc) if err != nil { return nil, err diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go index 3bdf74cb9d..0676fcdedd 100644 --- a/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go @@ -9,17 +9,19 @@ package errdefs import ( stderrors "errors" "net" - "strings" "syscall" + "github.com/containerd/containerd/errdefs" "github.com/pkg/errors" ) -const signalKilled = "signal: killed" - var ( - ErrAlreadyExists = errors.New("already exists") - ErrNotFound = errors.New("not found") + ErrAlreadyExists = errdefs.ErrAlreadyExists + ErrNotFound = errdefs.ErrNotFound + ErrInvalidArgument = errors.New("invalid argument") + ErrUnavailable = errors.New("unavailable") + ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented + ErrDeviceBusy = errors.New("device busy") // represents not supported and unimplemented ) // IsAlreadyExists returns true if the error is due to already exists @@ -32,11 +34,6 @@ func IsNotFound(err error) bool { return errors.Is(err, ErrNotFound) } -// IsSignalKilled returns true if the error is signal killed -func IsSignalKilled(err error) bool { - return strings.Contains(err.Error(), signalKilled) -} - // IsConnectionClosed returns true if error is due to connection closed // this is used when snapshotter closed by sig term func IsConnectionClosed(err error) bool { diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go new file mode 100644 index 0000000000..af9417bce0 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2020. Ant Group. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package label + +import ( + snpkg "github.com/containerd/containerd/pkg/snapshotters" +) + +// For package compatibility, we still keep the old exported name here. +var AppendLabelsHandlerWrapper = snpkg.AppendInfoHandlerWrapper + +// For package compatibility, we still keep the old exported name here. +const ( + CRIImageRef = snpkg.TargetRefLabel + CRIImageLayers = snpkg.TargetImageLayersLabel + CRILayerDigest = snpkg.TargetLayerDigestLabel + CRIManifestDigest = snpkg.TargetManifestDigestLabel +) + +const ( + // Marker for remote snapshotter to handle the pull request. + // During image pull, the containerd client calls Prepare API with the label containerd.io/snapshot.ref. + // This is a containerd-defined label which contains ChainID that targets a committed snapshot that the + // client is trying to prepare. + TargetSnapshotRef = "containerd.io/snapshot.ref" + + // A bool flag to mark the blob as a Nydus data blob, set by image builders. + NydusDataLayer = "containerd.io/snapshot/nydus-blob" + // A bool flag to mark the blob as a nydus bootstrap, set by image builders. + NydusMetaLayer = "containerd.io/snapshot/nydus-bootstrap" + // The referenced blob sha256 in format of `sha256:xxx`, set by image builders. + NydusRefLayer = "containerd.io/snapshot/nydus-ref" + // Annotation containing secret to pull images from registry, set by the snapshotter. + NydusImagePullSecret = "containerd.io/snapshot/pullsecret" + // Annotation containing username to pull images from registry, set by the snapshotter. + NydusImagePullUsername = "containerd.io/snapshot/pullusername" + // A bool flag to enable integrity verification of meta data blob + NydusSignature = "containerd.io/snapshot/nydus-signature" + + // A bool flag to mark the blob as a estargz data blob, set by the snapshotter. + StargzLayer = "containerd.io/snapshot/stargz" + + // volatileOpt is a key of an optional label to each snapshot. + // If this optional label of a snapshot is specified, when mounted to rootdir + // this snapshot will include volatile option + OverlayfsVolatileOpt = "containerd.io/snapshot/overlay.volatile" +) + +func IsNydusDataLayer(labels map[string]string) bool { + _, ok := labels[NydusDataLayer] + return ok +} + +func IsNydusMetaLayer(labels map[string]string) bool { + if labels == nil { + return false + } + _, ok := labels[NydusMetaLayer] + return ok +} diff --git a/vendor/github.com/containerd/typeurl/.gitignore b/vendor/github.com/containerd/typeurl/.gitignore deleted file mode 100644 index d53846778b..0000000000 --- a/vendor/github.com/containerd/typeurl/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.test -coverage.txt diff --git a/vendor/github.com/containerd/typeurl/README.md b/vendor/github.com/containerd/typeurl/README.md deleted file mode 100644 index d021e96724..0000000000 --- a/vendor/github.com/containerd/typeurl/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# typeurl - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/typeurl)](https://pkg.go.dev/github.com/containerd/typeurl) -[![Build Status](https://github.com/containerd/typeurl/workflows/CI/badge.svg)](https://github.com/containerd/typeurl/actions?query=workflow%3ACI) -[![codecov](https://codecov.io/gh/containerd/typeurl/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl) -[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/typeurl)](https://goreportcard.com/report/github.com/containerd/typeurl) - -A Go package for managing the registration, marshaling, and unmarshaling of encoded types. - -This package helps when types are sent over a GRPC API and marshaled as a [protobuf.Any](https://github.com/gogo/protobuf/blob/master/protobuf/google/protobuf/any.proto). - -## Project details - -**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/typeurl/doc.go b/vendor/github.com/containerd/typeurl/doc.go deleted file mode 100644 index c0d0fd2053..0000000000 --- a/vendor/github.com/containerd/typeurl/doc.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package typeurl - -// Package typeurl assists with managing the registration, marshaling, and -// unmarshaling of types encoded as protobuf.Any. -// -// A protobuf.Any is a proto message that can contain any arbitrary data. It -// consists of two components, a TypeUrl and a Value, and its proto definition -// looks like this: -// -// message Any { -// string type_url = 1; -// bytes value = 2; -// } -// -// The TypeUrl is used to distinguish the contents from other proto.Any -// messages. This typeurl library manages these URLs to enable automagic -// marshaling and unmarshaling of the contents. -// -// For example, consider this go struct: -// -// type Foo struct { -// Field1 string -// Field2 string -// } -// -// To use typeurl, types must first be registered. This is typically done in -// the init function -// -// func init() { -// typeurl.Register(&Foo{}, "Foo") -// } -// -// This will register the type Foo with the url path "Foo". The arguments to -// Register are variadic, and are used to construct a url path. Consider this -// example, from the github.com/containerd/containerd/client package: -// -// func init() { -// const prefix = "types.containerd.io" -// // register TypeUrls for commonly marshaled external types -// major := strconv.Itoa(specs.VersionMajor) -// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec") -// // this function has more Register calls, which are elided. -// } -// -// This registers several types under a more complex url, which ends up mapping -// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other -// value for major). -// -// Once a type is registered, it can be marshaled to a proto.Any message simply -// by calling `MarshalAny`, like this: -// -// foo := &Foo{Field1: "value1", Field2: "value2"} -// anyFoo, err := typeurl.MarshalAny(foo) -// -// MarshalAny will resolve the correct URL for the type. If the type in -// question implements the proto.Message interface, then it will be marshaled -// as a proto message. Otherwise, it will be marshaled as json. This means that -// typeurl will work on any arbitrary data, whether or not it has a proto -// definition, as long as it can be serialized to json. -// -// To unmarshal, the process is simply inverse: -// -// iface, err := typeurl.UnmarshalAny(anyFoo) -// foo := iface.(*Foo) -// -// The correct type is automatically chosen from the type registry, and the -// returned interface can be cast straight to that type. diff --git a/vendor/github.com/containerd/typeurl/types.go b/vendor/github.com/containerd/typeurl/types.go deleted file mode 100644 index 647d419a29..0000000000 --- a/vendor/github.com/containerd/typeurl/types.go +++ /dev/null @@ -1,214 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package typeurl - -import ( - "encoding/json" - "path" - "reflect" - "sync" - - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" - "github.com/pkg/errors" -) - -var ( - mu sync.RWMutex - registry = make(map[reflect.Type]string) -) - -// Definitions of common error types used throughout typeurl. -// -// These error types are used with errors.Wrap and errors.Wrapf to add context -// to an error. -// -// To detect an error class, use errors.Is() functions to tell whether an -// error is of this type. -var ( - ErrNotFound = errors.New("not found") -) - -// Register a type with a base URL for JSON marshaling. When the MarshalAny and -// UnmarshalAny functions are called they will treat the Any type value as JSON. -// To use protocol buffers for handling the Any value the proto.Register -// function should be used instead of this function. -func Register(v interface{}, args ...string) { - var ( - t = tryDereference(v) - p = path.Join(args...) - ) - mu.Lock() - defer mu.Unlock() - if et, ok := registry[t]; ok { - if et != p { - panic(errors.Errorf("type registered with alternate path %q != %q", et, p)) - } - return - } - registry[t] = p -} - -// TypeURL returns the type url for a registered type. -func TypeURL(v interface{}) (string, error) { - mu.RLock() - u, ok := registry[tryDereference(v)] - mu.RUnlock() - if !ok { - // fallback to the proto registry if it is a proto message - pb, ok := v.(proto.Message) - if !ok { - return "", errors.Wrapf(ErrNotFound, "type %s", reflect.TypeOf(v)) - } - return proto.MessageName(pb), nil - } - return u, nil -} - -// Is returns true if the type of the Any is the same as v. -func Is(any *types.Any, v interface{}) bool { - // call to check that v is a pointer - tryDereference(v) - url, err := TypeURL(v) - if err != nil { - return false - } - return any.TypeUrl == url -} - -// MarshalAny marshals the value v into an any with the correct TypeUrl. -// If the provided object is already a proto.Any message, then it will be -// returned verbatim. If it is of type proto.Message, it will be marshaled as a -// protocol buffer. Otherwise, the object will be marshaled to json. -func MarshalAny(v interface{}) (*types.Any, error) { - var marshal func(v interface{}) ([]byte, error) - switch t := v.(type) { - case *types.Any: - // avoid reserializing the type if we have an any. - return t, nil - case proto.Message: - marshal = func(v interface{}) ([]byte, error) { - return proto.Marshal(t) - } - default: - marshal = json.Marshal - } - - url, err := TypeURL(v) - if err != nil { - return nil, err - } - - data, err := marshal(v) - if err != nil { - return nil, err - } - return &types.Any{ - TypeUrl: url, - Value: data, - }, nil -} - -// UnmarshalAny unmarshals the any type into a concrete type. -func UnmarshalAny(any *types.Any) (interface{}, error) { - return UnmarshalByTypeURL(any.TypeUrl, any.Value) -} - -// UnmarshalByTypeURL unmarshals the given type and value to into a concrete type. -func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) { - return unmarshal(typeURL, value, nil) -} - -// UnmarshalTo unmarshals the any type into a concrete type passed in the out -// argument. It is identical to UnmarshalAny, but lets clients provide a -// destination type through the out argument. -func UnmarshalTo(any *types.Any, out interface{}) error { - return UnmarshalToByTypeURL(any.TypeUrl, any.Value, out) -} - -// UnmarshalTo unmarshals the given type and value into a concrete type passed -// in the out argument. It is identical to UnmarshalByTypeURL, but lets clients -// provide a destination type through the out argument. -func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error { - _, err := unmarshal(typeURL, value, out) - return err -} - -func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) { - t, err := getTypeByUrl(typeURL) - if err != nil { - return nil, err - } - - if v == nil { - v = reflect.New(t.t).Interface() - } else { - // Validate interface type provided by client - vURL, err := TypeURL(v) - if err != nil { - return nil, err - } - if typeURL != vURL { - return nil, errors.Errorf("can't unmarshal type %q to output %q", typeURL, vURL) - } - } - - if t.isProto { - err = proto.Unmarshal(value, v.(proto.Message)) - } else { - err = json.Unmarshal(value, v) - } - - return v, err -} - -type urlType struct { - t reflect.Type - isProto bool -} - -func getTypeByUrl(url string) (urlType, error) { - mu.RLock() - for t, u := range registry { - if u == url { - mu.RUnlock() - return urlType{ - t: t, - }, nil - } - } - mu.RUnlock() - // fallback to proto registry - t := proto.MessageType(url) - if t != nil { - return urlType{ - // get the underlying Elem because proto returns a pointer to the type - t: t.Elem(), - isProto: true, - }, nil - } - return urlType{}, errors.Wrapf(ErrNotFound, "type with url %s", url) -} - -func tryDereference(v interface{}) reflect.Type { - t := reflect.TypeOf(v) - if t.Kind() == reflect.Ptr { - // require check of pointer but dereference to register - return t.Elem() - } - panic("v is not a pointer to a type") -} diff --git a/vendor/github.com/moby/buildkit/cache/blobs.go b/vendor/github.com/moby/buildkit/cache/blobs.go index 716be90934..33e9693f19 100644 --- a/vendor/github.com/moby/buildkit/cache/blobs.go +++ b/vendor/github.com/moby/buildkit/cache/blobs.go @@ -11,6 +11,7 @@ import ( "github.com/containerd/containerd/leases" "github.com/containerd/containerd/mount" "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/winlayers" @@ -18,11 +19,11 @@ import ( imagespecidentity "github.com/opencontainers/image-spec/identity" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) -var g flightcontrol.Group +var g flightcontrol.Group[struct{}] +var gFileList flightcontrol.Group[[]string] const containerdUncompressed = "containerd.io/uncompressed" @@ -86,12 +87,12 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool if _, ok := filter[sr.ID()]; ok { eg.Go(func() error { - _, err := g.Do(ctx, fmt.Sprintf("%s-%t", sr.ID(), createIfNeeded), func(ctx context.Context) (interface{}, error) { + _, err := g.Do(ctx, fmt.Sprintf("%s-%t", sr.ID(), createIfNeeded), func(ctx context.Context) (struct{}, error) { if sr.getBlob() != "" { - return nil, nil + return struct{}{}, nil } if !createIfNeeded { - return nil, errors.WithStack(ErrNoBlobs) + return struct{}{}, errors.WithStack(ErrNoBlobs) } compressorFunc, finalize := comp.Type.Compress(ctx, comp) @@ -108,12 +109,12 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool if lowerRef != nil { m, err := lowerRef.Mount(ctx, true, s) if err != nil { - return nil, err + return struct{}{}, err } var release func() error lower, release, err = m.Mount() if err != nil { - return nil, err + return struct{}{}, err } if release != nil { defer release() @@ -131,12 +132,12 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool if upperRef != nil { m, err := upperRef.Mount(ctx, true, s) if err != nil { - return nil, err + return struct{}{}, err } var release func() error upper, release, err = m.Mount() if err != nil { - return nil, err + return struct{}{}, err } if release != nil { defer release() @@ -151,7 +152,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool if forceOvlStr := os.Getenv("BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF"); forceOvlStr != "" && sr.kind() != Diff { enableOverlay, err = strconv.ParseBool(forceOvlStr) if err != nil { - return nil, errors.Wrapf(err, "invalid boolean in BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF") + return struct{}{}, errors.Wrapf(err, "invalid boolean in BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF") } fallback = false // prohibit fallback on debug } else if !isTypeWindows(sr) { @@ -173,14 +174,14 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool if !ok || err != nil { if !fallback { if !ok { - return nil, errors.Errorf("overlay mounts not detected (lower=%+v,upper=%+v)", lower, upper) + return struct{}{}, errors.Errorf("overlay mounts not detected (lower=%+v,upper=%+v)", lower, upper) } if err != nil { - return nil, errors.Wrapf(err, "failed to compute overlay diff") + return struct{}{}, errors.Wrapf(err, "failed to compute overlay diff") } } if logWarnOnErr { - logrus.Warnf("failed to compute blob by overlay differ (ok=%v): %v", ok, err) + bklog.G(ctx).Warnf("failed to compute blob by overlay differ (ok=%v): %v", ok, err) } } if ok { @@ -198,7 +199,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool diff.WithCompressor(compressorFunc), ) if err != nil { - logrus.WithError(err).Warnf("failed to compute blob by buildkit differ") + bklog.G(ctx).WithError(err).Warnf("failed to compute blob by buildkit differ") } } @@ -209,7 +210,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool diff.WithCompressor(compressorFunc), ) if err != nil { - return nil, err + return struct{}{}, err } } @@ -219,7 +220,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool if finalize != nil { a, err := finalize(ctx, sr.cm.ContentStore) if err != nil { - return nil, errors.Wrapf(err, "failed to finalize compression") + return struct{}{}, errors.Wrapf(err, "failed to finalize compression") } for k, v := range a { desc.Annotations[k] = v @@ -227,7 +228,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool } info, err := sr.cm.ContentStore.Info(ctx, desc.Digest) if err != nil { - return nil, err + return struct{}{}, err } if diffID, ok := info.Labels[containerdUncompressed]; ok { @@ -235,13 +236,13 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool } else if mediaType == ocispecs.MediaTypeImageLayer { desc.Annotations[containerdUncompressed] = desc.Digest.String() } else { - return nil, errors.Errorf("unknown layer compression type") + return struct{}{}, errors.Errorf("unknown layer compression type") } if err := sr.setBlob(ctx, desc); err != nil { - return nil, err + return struct{}{}, err } - return nil, nil + return struct{}{}, nil }) if err != nil { return err @@ -415,29 +416,29 @@ func isTypeWindows(sr *immutableRef) bool { // ensureCompression ensures the specified ref has the blob of the specified compression Type. func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.Config, s session.Group) error { - _, err := g.Do(ctx, fmt.Sprintf("%s-%s", ref.ID(), comp.Type), func(ctx context.Context) (interface{}, error) { + _, err := g.Do(ctx, fmt.Sprintf("ensureComp-%s-%s", ref.ID(), comp.Type), func(ctx context.Context) (struct{}, error) { desc, err := ref.ociDesc(ctx, ref.descHandlers, true) if err != nil { - return nil, err + return struct{}{}, err } // Resolve converters layerConvertFunc, err := getConverter(ctx, ref.cm.ContentStore, desc, comp) if err != nil { - return nil, err + return struct{}{}, err } else if layerConvertFunc == nil { if isLazy, err := ref.isLazy(ctx); err != nil { - return nil, err + return struct{}{}, err } else if isLazy { // This ref can be used as the specified compressionType. Keep it lazy. - return nil, nil + return struct{}{}, nil } - return nil, ref.linkBlob(ctx, desc) + return struct{}{}, ref.linkBlob(ctx, desc) } // First, lookup local content store if _, err := ref.getBlobWithCompression(ctx, comp.Type); err == nil { - return nil, nil // found the compression variant. no need to convert. + return struct{}{}, nil // found the compression variant. no need to convert. } // Convert layer compression type @@ -447,18 +448,18 @@ func ensureCompression(ctx context.Context, ref *immutableRef, comp compression. dh: ref.descHandlers[desc.Digest], session: s, }).Unlazy(ctx); err != nil { - return nil, err + return struct{}{}, err } newDesc, err := layerConvertFunc(ctx, ref.cm.ContentStore, desc) if err != nil { - return nil, errors.Wrapf(err, "failed to convert") + return struct{}{}, errors.Wrapf(err, "failed to convert") } // Start to track converted layer if err := ref.linkBlob(ctx, *newDesc); err != nil { - return nil, errors.Wrapf(err, "failed to add compression blob") + return struct{}{}, errors.Wrapf(err, "failed to add compression blob") } - return nil, nil + return struct{}{}, nil }) return err } diff --git a/vendor/github.com/moby/buildkit/cache/compression_nydus.go b/vendor/github.com/moby/buildkit/cache/compression_nydus.go index 48b61a4b36..1b64430647 100644 --- a/vendor/github.com/moby/buildkit/cache/compression_nydus.go +++ b/vendor/github.com/moby/buildkit/cache/compression_nydus.go @@ -6,7 +6,6 @@ package cache import ( "compress/gzip" "context" - "encoding/json" "io" "github.com/containerd/containerd/content" @@ -18,13 +17,13 @@ import ( ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - nydusify "github.com/containerd/nydus-snapshotter/pkg/converter" + "github.com/containerd/nydus-snapshotter/pkg/converter" ) func init() { additionalAnnotations = append( additionalAnnotations, - nydusify.LayerAnnotationNydusBlob, nydusify.LayerAnnotationNydusBootstrap, nydusify.LayerAnnotationNydusBlobIDs, + converter.LayerAnnotationNydusBlob, converter.LayerAnnotationNydusBootstrap, ) } @@ -58,7 +57,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config, // Extracts nydus bootstrap from nydus format for each layer. var cm *cacheManager - layers := []nydusify.Layer{} + layers := []converter.Layer{} blobIDs := []string{} for _, ref := range refs { blobDesc, err := getBlobWithCompressionWithRetry(ctx, ref, comp, s) @@ -74,7 +73,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config, cm = ref.cm } blobIDs = append(blobIDs, blobDesc.Digest.Hex()) - layers = append(layers, nydusify.Layer{ + layers = append(layers, converter.Layer{ Digest: blobDesc.Digest, ReaderAt: ra, }) @@ -84,7 +83,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config, pr, pw := io.Pipe() go func() { defer pw.Close() - if _, err := nydusify.Merge(ctx, layers, pw, nydusify.MergeOption{ + if _, err := converter.Merge(ctx, layers, pw, converter.MergeOption{ WithTar: true, }); err != nil { pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap")) @@ -125,11 +124,6 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config, return nil, errors.Wrap(err, "get info from content store") } - blobIDsBytes, err := json.Marshal(blobIDs) - if err != nil { - return nil, errors.Wrap(err, "marshal blob ids") - } - desc := ocispecs.Descriptor{ Digest: compressedDgst, Size: info.Size, @@ -137,9 +131,7 @@ func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config, Annotations: map[string]string{ containerdUncompressed: uncompressedDgst.Digest().String(), // Use this annotation to identify nydus bootstrap layer. - nydusify.LayerAnnotationNydusBootstrap: "true", - // Track all blob digests for nydus snapshotter. - nydusify.LayerAnnotationNydusBlobIDs: string(blobIDsBytes), + converter.LayerAnnotationNydusBootstrap: "true", }, } diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go index dcf424a6b4..e0f58d57b3 100644 --- a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go +++ b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go @@ -110,7 +110,9 @@ func (cm *cacheManager) GetCacheContext(ctx context.Context, md cache.RefMetadat cm.lruMu.Unlock() if ok { cm.locker.Unlock(md.ID()) + v.(*cacheContext).mu.Lock() // locking is required because multiple ImmutableRefs can reach this code; however none of them use the linkMap. v.(*cacheContext).linkMap = map[string][][]byte{} + v.(*cacheContext).mu.Unlock() return v.(*cacheContext), nil } cc, err := newCacheContext(md) diff --git a/vendor/github.com/moby/buildkit/cache/filelist.go b/vendor/github.com/moby/buildkit/cache/filelist.go index c2c7921fd5..0cb2e9b60a 100644 --- a/vendor/github.com/moby/buildkit/cache/filelist.go +++ b/vendor/github.com/moby/buildkit/cache/filelist.go @@ -20,7 +20,7 @@ const keyFileList = "filelist" // are in the tar stream (AUFS whiteout format). If the reference does not have a // a blob associated with it, the list is empty. func (sr *immutableRef) FileList(ctx context.Context, s session.Group) ([]string, error) { - res, err := g.Do(ctx, fmt.Sprintf("filelist-%s", sr.ID()), func(ctx context.Context) (interface{}, error) { + return gFileList.Do(ctx, fmt.Sprintf("filelist-%s", sr.ID()), func(ctx context.Context) ([]string, error) { dt, err := sr.GetExternal(keyFileList) if err == nil && dt != nil { var files []string @@ -80,11 +80,4 @@ func (sr *immutableRef) FileList(ctx context.Context, s session.Group) ([]string } return files, nil }) - if err != nil { - return nil, err - } - if res == nil { - return nil, nil - } - return res.([]string), nil } diff --git a/vendor/github.com/moby/buildkit/cache/manager.go b/vendor/github.com/moby/buildkit/cache/manager.go index d579a6007b..64322055ef 100644 --- a/vendor/github.com/moby/buildkit/cache/manager.go +++ b/vendor/github.com/moby/buildkit/cache/manager.go @@ -27,7 +27,6 @@ import ( imagespecidentity "github.com/opencontainers/image-spec/identity" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) @@ -94,7 +93,7 @@ type cacheManager struct { mountPool sharableMountPool muPrune sync.Mutex // make sure parallel prune is not allowed so there will not be inconsistent results - unlazyG flightcontrol.Group + unlazyG flightcontrol.Group[struct{}] } func NewManager(opt ManagerOpt) (Manager, error) { @@ -243,7 +242,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor, if err := cm.LeaseManager.Delete(context.TODO(), leases.Lease{ ID: l.ID, }); err != nil { - logrus.Errorf("failed to remove lease: %+v", err) + bklog.G(ctx).Errorf("failed to remove lease: %+v", err) } } }() @@ -319,7 +318,7 @@ func (cm *cacheManager) init(ctx context.Context) error { for _, si := range items { if _, err := cm.getRecord(ctx, si.ID()); err != nil { - logrus.Debugf("could not load snapshot %s: %+v", si.ID(), err) + bklog.G(ctx).Debugf("could not load snapshot %s: %+v", si.ID(), err) cm.MetadataStore.Clear(si.ID()) cm.LeaseManager.Delete(ctx, leases.Lease{ID: si.ID()}) } @@ -597,7 +596,7 @@ func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, sess session.Gr if err := cm.LeaseManager.Delete(context.TODO(), leases.Lease{ ID: l.ID, }); err != nil { - logrus.Errorf("failed to remove lease: %+v", err) + bklog.G(ctx).Errorf("failed to remove lease: %+v", err) } } }() @@ -1426,12 +1425,13 @@ func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) d.Size = 0 return nil } + defer ref.Release(context.TODO()) s, err := ref.size(ctx) if err != nil { return err } d.Size = s - return ref.Release(context.TODO()) + return nil }) }(d) } diff --git a/vendor/github.com/moby/buildkit/cache/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata.go index 82209a93c0..b223024dca 100644 --- a/vendor/github.com/moby/buildkit/cache/metadata.go +++ b/vendor/github.com/moby/buildkit/cache/metadata.go @@ -87,7 +87,7 @@ func (cm *cacheManager) Search(ctx context.Context, idx string) ([]RefMetadata, // callers must hold cm.mu lock func (cm *cacheManager) search(ctx context.Context, idx string) ([]RefMetadata, error) { - sis, err := cm.MetadataStore.Search(idx) + sis, err := cm.MetadataStore.Search(ctx, idx) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/cache/metadata/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata/metadata.go index 170c0a8872..1240034a44 100644 --- a/vendor/github.com/moby/buildkit/cache/metadata/metadata.go +++ b/vendor/github.com/moby/buildkit/cache/metadata/metadata.go @@ -2,12 +2,13 @@ package metadata import ( "bytes" + "context" "encoding/json" "strings" "sync" + "github.com/moby/buildkit/util/bklog" "github.com/pkg/errors" - "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" ) @@ -80,7 +81,7 @@ func (s *Store) Probe(index string) (bool, error) { return exists, errors.WithStack(err) } -func (s *Store) Search(index string) ([]*StorageItem, error) { +func (s *Store) Search(ctx context.Context, index string) ([]*StorageItem, error) { var out []*StorageItem err := s.db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(indexBucket)) @@ -100,7 +101,7 @@ func (s *Store) Search(index string) ([]*StorageItem, error) { k, _ = c.Next() b := main.Bucket([]byte(itemID)) if b == nil { - logrus.Errorf("index pointing to missing record %s", itemID) + bklog.G(ctx).Errorf("index pointing to missing record %s", itemID) continue } si, err := newStorageItem(itemID, b, s) diff --git a/vendor/github.com/moby/buildkit/cache/refs.go b/vendor/github.com/moby/buildkit/cache/refs.go index 0af736ab70..e448f94b29 100644 --- a/vendor/github.com/moby/buildkit/cache/refs.go +++ b/vendor/github.com/moby/buildkit/cache/refs.go @@ -27,6 +27,7 @@ import ( "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/overlay" "github.com/moby/buildkit/util/progress" rootlessmountopts "github.com/moby/buildkit/util/rootless/mountopts" "github.com/moby/buildkit/util/winlayers" @@ -89,7 +90,7 @@ type cacheRecord struct { mountCache snapshot.Mountable - sizeG flightcontrol.Group + sizeG flightcontrol.Group[int64] // these are filled if multiple refs point to same data equalMutable *mutableRef @@ -107,6 +108,7 @@ func (cr *cacheRecord) ref(triggerLastUsed bool, descHandlers DescHandlers, pg p progress: pg, } cr.refs[ref] = struct{}{} + bklog.G(context.TODO()).WithFields(ref.traceLogFields()).Trace("acquired cache ref") return ref } @@ -118,6 +120,7 @@ func (cr *cacheRecord) mref(triggerLastUsed bool, descHandlers DescHandlers) *mu descHandlers: descHandlers, } cr.refs[ref] = struct{}{} + bklog.G(context.TODO()).WithFields(ref.traceLogFields()).Trace("acquired cache ref") return ref } @@ -322,7 +325,7 @@ func (cr *cacheRecord) viewSnapshotID() string { func (cr *cacheRecord) size(ctx context.Context) (int64, error) { // this expects that usage() is implemented lazily - s, err := cr.sizeG.Do(ctx, cr.ID(), func(ctx context.Context) (interface{}, error) { + return cr.sizeG.Do(ctx, cr.ID(), func(ctx context.Context) (int64, error) { cr.mu.Lock() s := cr.getSize() if s != sizeUnknown { @@ -343,7 +346,7 @@ func (cr *cacheRecord) size(ctx context.Context) (int64, error) { isDead := cr.isDead() cr.mu.Unlock() if isDead { - return int64(0), nil + return 0, nil } if !errors.Is(err, errdefs.ErrNotFound) { return s, errors.Wrapf(err, "failed to get usage for %s", cr.ID()) @@ -376,10 +379,6 @@ func (cr *cacheRecord) size(ctx context.Context) (int64, error) { cr.mu.Unlock() return usage.Size, nil }) - if err != nil { - return 0, err - } - return s.(int64), nil } // caller must hold cr.mu @@ -438,7 +437,19 @@ func (cr *cacheRecord) mount(ctx context.Context, s session.Group) (_ snapshot.M } // call when holding the manager lock -func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) error { +func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) (rerr error) { + defer func() { + l := bklog.G(ctx).WithFields(map[string]any{ + "id": cr.ID(), + "refCount": len(cr.refs), + "removeSnapshot": removeSnapshot, + "stack": bklog.LazyStackTrace{}, + }) + if rerr != nil { + l = l.WithError(rerr) + } + l.Trace("removed cache record") + }() delete(cr.cm.records, cr.ID()) if removeSnapshot { if err := cr.cm.LeaseManager.Delete(ctx, leases.Lease{ @@ -469,6 +480,24 @@ type immutableRef struct { progress progress.Controller } +// hold ref lock before calling +func (sr *immutableRef) traceLogFields() logrus.Fields { + m := map[string]any{ + "id": sr.ID(), + "refID": fmt.Sprintf("%p", sr), + "newRefCount": len(sr.refs), + "mutable": false, + "stack": bklog.LazyStackTrace{}, + } + if sr.equalMutable != nil { + m["equalMutableID"] = sr.equalMutable.ID() + } + if sr.equalImmutable != nil { + m["equalImmutableID"] = sr.equalImmutable.ID() + } + return m +} + // Order is from parent->child, sr will be at end of slice. Refs should not // be released as they are used internally in the underlying cacheRecords. func (sr *immutableRef) layerChain() []*immutableRef { @@ -591,6 +620,24 @@ type mutableRef struct { descHandlers DescHandlers } +// hold ref lock before calling +func (sr *mutableRef) traceLogFields() logrus.Fields { + m := map[string]any{ + "id": sr.ID(), + "refID": fmt.Sprintf("%p", sr), + "newRefCount": len(sr.refs), + "mutable": true, + "stack": bklog.LazyStackTrace{}, + } + if sr.equalMutable != nil { + m["equalMutableID"] = sr.equalMutable.ID() + } + if sr.equalImmutable != nil { + m["equalImmutableID"] = sr.equalImmutable.ID() + } + return m +} + func (sr *mutableRef) DescHandler(dgst digest.Digest) *DescHandler { return sr.descHandlers[dgst] } @@ -615,11 +662,11 @@ func layerToDistributable(mt string) string { } switch mt { - case ocispecs.MediaTypeImageLayerNonDistributable: + case ocispecs.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. return ocispecs.MediaTypeImageLayer - case ocispecs.MediaTypeImageLayerNonDistributableGzip: + case ocispecs.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. return ocispecs.MediaTypeImageLayerGzip - case ocispecs.MediaTypeImageLayerNonDistributableZstd: + case ocispecs.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. return ocispecs.MediaTypeImageLayerZstd case images.MediaTypeDockerSchema2LayerForeign: return images.MediaTypeDockerSchema2Layer @@ -633,11 +680,11 @@ func layerToDistributable(mt string) string { func layerToNonDistributable(mt string) string { switch mt { case ocispecs.MediaTypeImageLayer: - return ocispecs.MediaTypeImageLayerNonDistributable + return ocispecs.MediaTypeImageLayerNonDistributable //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. case ocispecs.MediaTypeImageLayerGzip: - return ocispecs.MediaTypeImageLayerNonDistributableGzip + return ocispecs.MediaTypeImageLayerNonDistributableGzip //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. case ocispecs.MediaTypeImageLayerZstd: - return ocispecs.MediaTypeImageLayerNonDistributableZstd + return ocispecs.MediaTypeImageLayerNonDistributableZstd //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. case images.MediaTypeDockerSchema2Layer: return images.MediaTypeDockerSchema2LayerForeign case images.MediaTypeDockerSchema2LayerForeignGzip: @@ -993,7 +1040,7 @@ func (sr *immutableRef) withRemoteSnapshotLabelsStargzMode(ctx context.Context, info.Labels[k] = "" // Remove labels appended in this call } if _, err := r.cm.Snapshotter.Update(ctx, info, flds...); err != nil { - logrus.Warn(errors.Wrapf(err, "failed to remove tmp remote labels")) + bklog.G(ctx).Warn(errors.Wrapf(err, "failed to remove tmp remote labels")) } }() @@ -1006,7 +1053,7 @@ func (sr *immutableRef) withRemoteSnapshotLabelsStargzMode(ctx context.Context, } func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s session.Group) error { - _, err := sr.sizeG.Do(ctx, sr.ID()+"-prepare-remote-snapshot", func(ctx context.Context) (_ interface{}, rerr error) { + _, err := g.Do(ctx, sr.ID()+"-prepare-remote-snapshot", func(ctx context.Context) (_ struct{}, rerr error) { dhs := sr.descHandlers for _, r := range sr.layerChain() { r := r @@ -1018,7 +1065,7 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s dh := dhs[digest.Digest(r.getBlob())] if dh == nil { // We cannot prepare remote snapshots without descHandler. - return nil, nil + return struct{}{}, nil } // tmpLabels contains dh.SnapshotLabels + session IDs. All keys contain @@ -1055,7 +1102,7 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s info.Labels[k] = "" } if _, err := r.cm.Snapshotter.Update(ctx, info, tmpFields...); err != nil { - logrus.Warn(errors.Wrapf(err, + bklog.G(ctx).Warn(errors.Wrapf(err, "failed to remove tmp remote labels after prepare")) } }() @@ -1070,7 +1117,7 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s break } - return nil, nil + return struct{}{}, nil }) return err } @@ -1093,18 +1140,18 @@ func makeTmpLabelsStargzMode(labels map[string]string, s session.Group) (fields } func (sr *immutableRef) unlazy(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, topLevel bool) error { - _, err := sr.sizeG.Do(ctx, sr.ID()+"-unlazy", func(ctx context.Context) (_ interface{}, rerr error) { + _, err := g.Do(ctx, sr.ID()+"-unlazy", func(ctx context.Context) (_ struct{}, rerr error) { if _, err := sr.cm.Snapshotter.Stat(ctx, sr.getSnapshotID()); err == nil { - return nil, nil + return struct{}{}, nil } switch sr.kind() { case Merge, Diff: - return nil, sr.unlazyDiffMerge(ctx, dhs, pg, s, topLevel) + return struct{}{}, sr.unlazyDiffMerge(ctx, dhs, pg, s, topLevel) case Layer, BaseLayer: - return nil, sr.unlazyLayer(ctx, dhs, pg, s) + return struct{}{}, sr.unlazyLayer(ctx, dhs, pg, s) } - return nil, nil + return struct{}{}, nil }) return err } @@ -1294,9 +1341,16 @@ func (sr *immutableRef) updateLastUsedNow() bool { return true } -func (sr *immutableRef) release(ctx context.Context) error { - delete(sr.refs, sr) +func (sr *immutableRef) release(ctx context.Context) (rerr error) { + defer func() { + l := bklog.G(ctx).WithFields(sr.traceLogFields()) + if rerr != nil { + l = l.WithError(rerr) + } + l.Trace("released cache ref") + }() + delete(sr.refs, sr) if sr.updateLastUsedNow() { sr.updateLastUsed() if sr.equalMutable != nil { @@ -1363,7 +1417,7 @@ func (cr *cacheRecord) finalize(ctx context.Context) error { cr.cm.mu.Lock() defer cr.cm.mu.Unlock() if err := mutable.remove(context.TODO(), true); err != nil { - logrus.Error(err) + bklog.G(ctx).Error(err) } }() @@ -1476,8 +1530,16 @@ func (sr *mutableRef) Release(ctx context.Context) error { return sr.release(ctx) } -func (sr *mutableRef) release(ctx context.Context) error { +func (sr *mutableRef) release(ctx context.Context) (rerr error) { + defer func() { + l := bklog.G(ctx).WithFields(sr.traceLogFields()) + if rerr != nil { + l = l.WithError(rerr) + } + l.Trace("released cache ref") + }() delete(sr.refs, sr) + if !sr.HasCachePolicyRetain() { if sr.equalImmutable != nil { if sr.equalImmutable.HasCachePolicyRetain() { @@ -1514,7 +1576,7 @@ func (m *readOnlyMounter) Mount() ([]mount.Mount, func() error, error) { return nil, nil, err } for i, m := range mounts { - if m.Type == "overlay" { + if overlay.IsOverlayMountType(m) { mounts[i].Options = readonlyOverlay(m.Options) continue } @@ -1624,7 +1686,7 @@ func (sm *sharableMountable) Mount() (_ []mount.Mount, _ func() error, retErr er }() var isOverlay bool for _, m := range mounts { - if m.Type == "overlay" { + if overlay.IsOverlayMountType(m) { isOverlay = true break } diff --git a/vendor/github.com/moby/buildkit/cache/remote.go b/vendor/github.com/moby/buildkit/cache/remote.go index b80bd79cfb..cfafef4cb5 100644 --- a/vendor/github.com/moby/buildkit/cache/remote.go +++ b/vendor/github.com/moby/buildkit/cache/remote.go @@ -305,11 +305,11 @@ func (p lazyRefProvider) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) } func (p lazyRefProvider) Unlazy(ctx context.Context) error { - _, err := p.ref.cm.unlazyG.Do(ctx, string(p.desc.Digest), func(ctx context.Context) (_ interface{}, rerr error) { + _, err := p.ref.cm.unlazyG.Do(ctx, string(p.desc.Digest), func(ctx context.Context) (_ struct{}, rerr error) { if isLazy, err := p.ref.isLazy(ctx); err != nil { - return nil, err + return struct{}{}, err } else if !isLazy { - return nil, nil + return struct{}{}, nil } defer func() { if rerr == nil { @@ -320,7 +320,7 @@ func (p lazyRefProvider) Unlazy(ctx context.Context) error { if p.dh == nil { // shouldn't happen, if you have a lazy immutable ref it already should be validated // that descriptor handlers exist for it - return nil, errors.New("unexpected nil descriptor handler") + return struct{}{}, errors.New("unexpected nil descriptor handler") } if p.dh.Progress != nil { @@ -337,7 +337,7 @@ func (p lazyRefProvider) Unlazy(ctx context.Context) error { Manager: p.ref.cm.ContentStore, }, p.desc, p.dh.Ref, logs.LoggerFromContext(ctx)) if err != nil { - return nil, err + return struct{}{}, err } if imageRefs := p.ref.getImageRefs(); len(imageRefs) > 0 { @@ -345,12 +345,12 @@ func (p lazyRefProvider) Unlazy(ctx context.Context) error { imageRef := imageRefs[0] if p.ref.GetDescription() == "" { if err := p.ref.SetDescription("pulled from " + imageRef); err != nil { - return nil, err + return struct{}{}, err } } } - return nil, nil + return struct{}{}, nil }) return err } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/export.go b/vendor/github.com/moby/buildkit/cache/remotecache/export.go index a0fd7ba7e2..fbb475132d 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/export.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/export.go @@ -16,7 +16,7 @@ import ( "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/progress/logs" digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go" + "github.com/opencontainers/image-spec/specs-go" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -37,24 +37,135 @@ type Config struct { Compression compression.Config } +type CacheType int + const ( // ExportResponseManifestDesc is a key for the map returned from Exporter.Finalize. // The map value is a JSON string of an OCI desciptor of a manifest. ExporterResponseManifestDesc = "cache.manifest" ) -type contentCacheExporter struct { - solver.CacheExporterTarget - chains *v1.CacheChains - ingester content.Ingester - oci bool - ref string - comp compression.Config +const ( + NotSet CacheType = iota + ManifestList + ImageManifest +) + +func (data CacheType) String() string { + switch data { + case ManifestList: + return "Manifest List" + case ImageManifest: + return "Image Manifest" + default: + return "Not Set" + } } -func NewExporter(ingester content.Ingester, ref string, oci bool, compressionConfig compression.Config) Exporter { +func NewExporter(ingester content.Ingester, ref string, oci bool, imageManifest bool, compressionConfig compression.Config) Exporter { cc := v1.NewCacheChains() - return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester, oci: oci, ref: ref, comp: compressionConfig} + return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester, oci: oci, imageManifest: imageManifest, ref: ref, comp: compressionConfig} +} + +type ExportableCache struct { + // This cache describes two distinct styles of exportable cache, one is an Index (or Manifest List) of blobs, + // or as an artifact using the OCI image manifest format. + ExportedManifest ocispecs.Manifest + ExportedIndex ocispecs.Index + CacheType CacheType + OCI bool +} + +func NewExportableCache(oci bool, imageManifest bool) (*ExportableCache, error) { + var mediaType string + + if imageManifest { + mediaType = ocispecs.MediaTypeImageManifest + if !oci { + return nil, errors.Errorf("invalid configuration for remote cache") + } + } else { + if oci { + mediaType = ocispecs.MediaTypeImageIndex + } else { + mediaType = images.MediaTypeDockerSchema2ManifestList + } + } + + cacheType := ManifestList + if imageManifest { + cacheType = ImageManifest + } + + schemaVersion := specs.Versioned{SchemaVersion: 2} + switch cacheType { + case ManifestList: + return &ExportableCache{ExportedIndex: ocispecs.Index{ + MediaType: mediaType, + Versioned: schemaVersion, + }, + CacheType: cacheType, + OCI: oci, + }, nil + case ImageManifest: + return &ExportableCache{ExportedManifest: ocispecs.Manifest{ + MediaType: mediaType, + Versioned: schemaVersion, + }, + CacheType: cacheType, + OCI: oci, + }, nil + default: + return nil, errors.Errorf("exportable cache type not set") + } +} + +func (ec *ExportableCache) MediaType() string { + if ec.CacheType == ManifestList { + return ec.ExportedIndex.MediaType + } + return ec.ExportedManifest.MediaType +} + +func (ec *ExportableCache) AddCacheBlob(blob ocispecs.Descriptor) { + if ec.CacheType == ManifestList { + ec.ExportedIndex.Manifests = append(ec.ExportedIndex.Manifests, blob) + } else { + ec.ExportedManifest.Layers = append(ec.ExportedManifest.Layers, blob) + } +} + +func (ec *ExportableCache) FinalizeCache(ctx context.Context) { + if ec.CacheType == ManifestList { + ec.ExportedIndex.Manifests = compression.ConvertAllLayerMediaTypes(ctx, ec.OCI, ec.ExportedIndex.Manifests...) + } else { + ec.ExportedManifest.Layers = compression.ConvertAllLayerMediaTypes(ctx, ec.OCI, ec.ExportedManifest.Layers...) + } +} + +func (ec *ExportableCache) SetConfig(config ocispecs.Descriptor) { + if ec.CacheType == ManifestList { + ec.ExportedIndex.Manifests = append(ec.ExportedIndex.Manifests, config) + } else { + ec.ExportedManifest.Config = config + } +} + +func (ec *ExportableCache) MarshalJSON() ([]byte, error) { + if ec.CacheType == ManifestList { + return json.Marshal(ec.ExportedIndex) + } + return json.Marshal(ec.ExportedManifest) +} + +type contentCacheExporter struct { + solver.CacheExporterTarget + chains *v1.CacheChains + ingester content.Ingester + oci bool + imageManifest bool + ref string + comp compression.Config } func (ce *contentCacheExporter) Name() string { @@ -74,21 +185,9 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string return nil, err } - // own type because oci type can't be pushed and docker type doesn't have annotations - type manifestList struct { - specs.Versioned - - MediaType string `json:"mediaType,omitempty"` - - // Manifests references platform specific manifests. - Manifests []ocispecs.Descriptor `json:"manifests"` - } - - var mfst manifestList - mfst.SchemaVersion = 2 - mfst.MediaType = images.MediaTypeDockerSchema2ManifestList - if ce.oci { - mfst.MediaType = ocispecs.MediaTypeImageIndex + cache, err := NewExportableCache(ce.oci, ce.imageManifest) + if err != nil { + return nil, err } for _, l := range config.Layers { @@ -101,10 +200,10 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string return nil, layerDone(errors.Wrap(err, "error writing layer blob")) } layerDone(nil) - mfst.Manifests = append(mfst.Manifests, dgstPair.Descriptor) + cache.AddCacheBlob(dgstPair.Descriptor) } - mfst.Manifests = compression.ConvertAllLayerMediaTypes(ce.oci, mfst.Manifests...) + cache.FinalizeCache(ctx) dt, err := json.Marshal(config) if err != nil { @@ -122,9 +221,9 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string } configDone(nil) - mfst.Manifests = append(mfst.Manifests, desc) + cache.SetConfig(desc) - dt, err = json.Marshal(mfst) + dt, err = cache.MarshalJSON() if err != nil { return nil, errors.Wrap(err, "failed to marshal manifest") } @@ -133,9 +232,14 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string desc = ocispecs.Descriptor{ Digest: dgst, Size: int64(len(dt)), - MediaType: mfst.MediaType, + MediaType: cache.MediaType(), } - mfstDone := progress.OneOff(ctx, fmt.Sprintf("writing manifest %s", dgst)) + + mfstLog := fmt.Sprintf("writing cache manifest %s", dgst) + if ce.imageManifest { + mfstLog = fmt.Sprintf("writing cache image manifest %s", dgst) + } + mfstDone := progress.OneOff(ctx, mfstLog) if err := content.WriteBlob(ctx, ce.ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil { return nil, mfstDone(errors.Wrap(err, "error writing manifest blob")) } @@ -145,5 +249,6 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string } res[ExporterResponseManifestDesc] = string(descJSON) mfstDone(nil) + return res, nil } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go b/vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go index f36693d3b0..c24755e93d 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go @@ -15,6 +15,7 @@ import ( v1 "github.com/moby/buildkit/cache/remotecache/v1" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/tracing" @@ -22,13 +23,12 @@ import ( digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" actionscache "github.com/tonistiigi/go-actions-cache" "golang.org/x/sync/errgroup" ) func init() { - actionscache.Log = logrus.Debugf + actionscache.Log = bklog.L.Debugf } const ( @@ -92,7 +92,7 @@ func NewExporter(c *Config) (remotecache.Exporter, error) { } func (*exporter) Name() string { - return "exporting to GitHub cache" + return "exporting to GitHub Actions Cache" } func (ce *exporter) Config() remotecache.Config { diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/import.go b/vendor/github.com/moby/buildkit/cache/remotecache/import.go index 6278090187..347d935e4a 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/import.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/import.go @@ -3,6 +3,7 @@ package remotecache import ( "context" "encoding/json" + "fmt" "io" "sync" "time" @@ -12,12 +13,13 @@ import ( v1 "github.com/moby/buildkit/cache/remotecache/v1" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) @@ -47,24 +49,52 @@ func (ci *contentCacheImporter) Resolve(ctx context.Context, desc ocispecs.Descr return nil, err } - var mfst ocispecs.Index - if err := json.Unmarshal(dt, &mfst); err != nil { + manifestType, err := imageutil.DetectManifestBlobMediaType(dt) + if err != nil { return nil, err } - allLayers := v1.DescriptorProvider{} + layerDone := progress.OneOff(ctx, fmt.Sprintf("inferred cache manifest type: %s", manifestType)) + layerDone(nil) + allLayers := v1.DescriptorProvider{} var configDesc ocispecs.Descriptor - for _, m := range mfst.Manifests { - if m.MediaType == v1.CacheConfigMediaTypeV0 { - configDesc = m - continue + switch manifestType { + case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex: + var mfst ocispecs.Index + if err := json.Unmarshal(dt, &mfst); err != nil { + return nil, err } - allLayers[m.Digest] = v1.DescriptorProviderPair{ - Descriptor: m, - Provider: ci.provider, + + for _, m := range mfst.Manifests { + if m.MediaType == v1.CacheConfigMediaTypeV0 { + configDesc = m + continue + } + allLayers[m.Digest] = v1.DescriptorProviderPair{ + Descriptor: m, + Provider: ci.provider, + } } + case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest: + var mfst ocispecs.Manifest + if err := json.Unmarshal(dt, &mfst); err != nil { + return nil, err + } + + if mfst.Config.MediaType == v1.CacheConfigMediaTypeV0 { + configDesc = mfst.Config + } + for _, m := range mfst.Layers { + allLayers[m.Digest] = v1.DescriptorProviderPair{ + Descriptor: m, + Provider: ci.provider, + } + } + default: + err = errors.Wrapf(err, "unsupported or uninferrable manifest type") + return nil, err } if dsls, ok := ci.provider.(DistributionSourceLabelSetter); ok { @@ -162,7 +192,7 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte } if len(img.Rootfs.DiffIDs) != len(m.Layers) { - logrus.Warnf("invalid image with mismatching manifest and config") + bklog.G(ctx).Warnf("invalid image with mismatching manifest and config") return nil } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go b/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go index 036ec059f7..3b7b0c68d2 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go @@ -8,10 +8,10 @@ import ( v1 "github.com/moby/buildkit/cache/remotecache/v1" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/compression" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc { @@ -85,7 +85,7 @@ func (ce *exporter) ExportForLayers(ctx context.Context, layers []digest.Digest) } if len(cfg.Layers) == 0 { - logrus.Warn("failed to match any cache with layers") + bklog.G(ctx).Warn("failed to match any cache with layers") return nil, nil } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go index 7f3d83b70f..818f9b441e 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go @@ -19,13 +19,19 @@ const ( attrDigest = "digest" attrSrc = "src" attrDest = "dest" + attrImageManifest = "image-manifest" attrOCIMediatypes = "oci-mediatypes" contentStoreIDPrefix = "local:" - attrLayerCompression = "compression" - attrForceCompression = "force-compression" - attrCompressionLevel = "compression-level" ) +type exporter struct { + remotecache.Exporter +} + +func (*exporter) Name() string { + return "exporting cache to client directory" +} + // ResolveCacheExporterFunc for "local" cache exporter. func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExporterFunc { return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) { @@ -33,7 +39,7 @@ func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExpor if store == "" { return nil, errors.New("local cache exporter requires dest") } - compressionConfig, err := attrsToCompression(attrs) + compressionConfig, err := compression.ParseAttributes(attrs) if err != nil { return nil, err } @@ -45,12 +51,20 @@ func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExpor } ociMediatypes = b } + imageManifest := false + if v, ok := attrs[attrImageManifest]; ok { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", attrImageManifest) + } + imageManifest = b + } csID := contentStoreIDPrefix + store cs, err := getContentStore(ctx, sm, g, csID) if err != nil { return nil, err } - return remotecache.NewExporter(cs, "", ociMediatypes, *compressionConfig), nil + return &exporter{remotecache.NewExporter(cs, "", ociMediatypes, imageManifest, compressionConfig)}, nil } } @@ -109,38 +123,3 @@ type unlazyProvider struct { func (p *unlazyProvider) UnlazySession(desc ocispecs.Descriptor) session.Group { return p.s } - -func attrsToCompression(attrs map[string]string) (*compression.Config, error) { - var compressionType compression.Type - if v, ok := attrs[attrLayerCompression]; ok { - c, err := compression.Parse(v) - if err != nil { - return nil, err - } - compressionType = c - } else { - compressionType = compression.Default - } - compressionConfig := compression.New(compressionType) - if v, ok := attrs[attrForceCompression]; ok { - var force bool - if v == "" { - force = true - } else { - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value %s specified for %s", v, attrForceCompression) - } - force = b - } - compressionConfig = compressionConfig.SetForce(force) - } - if v, ok := attrs[attrCompressionLevel]; ok { - ii, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return nil, errors.Wrapf(err, "non-integer value %s specified for %s", v, attrCompressionLevel) - } - compressionConfig = compressionConfig.SetLevel(int(ii)) - } - return &compressionConfig, nil -} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go index e3b32eb296..007da98855 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go @@ -15,34 +15,43 @@ import ( "github.com/moby/buildkit/util/estargz" "github.com/moby/buildkit/util/push" "github.com/moby/buildkit/util/resolver" + resolverconfig "github.com/moby/buildkit/util/resolver/config" "github.com/moby/buildkit/util/resolver/limited" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) -func canonicalizeRef(rawRef string) (string, error) { +func canonicalizeRef(rawRef string) (reference.Named, error) { if rawRef == "" { - return "", errors.New("missing ref") + return nil, errors.New("missing ref") } parsed, err := reference.ParseNormalizedNamed(rawRef) if err != nil { - return "", err + return nil, err } - return reference.TagNameOnly(parsed).String(), nil + parsed = reference.TagNameOnly(parsed) + return parsed, nil } const ( - attrRef = "ref" - attrOCIMediatypes = "oci-mediatypes" - attrLayerCompression = "compression" - attrForceCompression = "force-compression" - attrCompressionLevel = "compression-level" + attrRef = "ref" + attrImageManifest = "image-manifest" + attrOCIMediatypes = "oci-mediatypes" + attrInsecure = "registry.insecure" ) +type exporter struct { + remotecache.Exporter +} + +func (*exporter) Name() string { + return "exporting cache to registry" +} + func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) remotecache.ResolveCacheExporterFunc { return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) { - compressionConfig, err := attrsToCompression(attrs) + compressionConfig, err := compression.ParseAttributes(attrs) if err != nil { return nil, err } @@ -50,6 +59,7 @@ func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) r if err != nil { return nil, err } + refString := ref.String() ociMediatypes := true if v, ok := attrs[attrOCIMediatypes]; ok { b, err := strconv.ParseBool(v) @@ -58,12 +68,30 @@ func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) r } ociMediatypes = b } - remote := resolver.DefaultPool.GetResolver(hosts, ref, "push", sm, g) - pusher, err := push.Pusher(ctx, remote, ref) + imageManifest := false + if v, ok := attrs[attrImageManifest]; ok { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", attrImageManifest) + } + imageManifest = b + } + insecure := false + if v, ok := attrs[attrInsecure]; ok { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", attrInsecure) + } + insecure = b + } + + scope, hosts := registryConfig(hosts, ref, "push", insecure) + remote := resolver.DefaultPool.GetResolver(hosts, refString, scope, sm, g) + pusher, err := push.Pusher(ctx, remote, refString) if err != nil { return nil, err } - return remotecache.NewExporter(contentutil.FromPusher(pusher), ref, ociMediatypes, *compressionConfig), nil + return &exporter{remotecache.NewExporter(contentutil.FromPusher(pusher), refString, ociMediatypes, imageManifest, compressionConfig)}, nil } } @@ -73,8 +101,19 @@ func ResolveCacheImporterFunc(sm *session.Manager, cs content.Store, hosts docke if err != nil { return nil, ocispecs.Descriptor{}, err } - remote := resolver.DefaultPool.GetResolver(hosts, ref, "pull", sm, g) - xref, desc, err := remote.Resolve(ctx, ref) + refString := ref.String() + insecure := false + if v, ok := attrs[attrInsecure]; ok { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, ocispecs.Descriptor{}, errors.Wrapf(err, "failed to parse %s", attrInsecure) + } + insecure = b + } + + scope, hosts := registryConfig(hosts, ref, "pull", insecure) + remote := resolver.DefaultPool.GetResolver(hosts, refString, scope, sm, g) + xref, desc, err := remote.Resolve(ctx, refString) if err != nil { return nil, ocispecs.Descriptor{}, err } @@ -83,8 +122,8 @@ func ResolveCacheImporterFunc(sm *session.Manager, cs content.Store, hosts docke return nil, ocispecs.Descriptor{}, err } src := &withDistributionSourceLabel{ - Provider: contentutil.FromFetcher(limited.Default.WrapFetcher(fetcher, ref)), - ref: ref, + Provider: contentutil.FromFetcher(limited.Default.WrapFetcher(fetcher, refString)), + ref: refString, source: cs, } return remotecache.NewImporter(src), desc, nil @@ -130,37 +169,17 @@ func (dsl *withDistributionSourceLabel) SnapshotLabels(descs []ocispecs.Descript return labels } -func attrsToCompression(attrs map[string]string) (*compression.Config, error) { - var compressionType compression.Type - if v, ok := attrs[attrLayerCompression]; ok { - c, err := compression.Parse(v) - if err != nil { - return nil, err - } - compressionType = c - } else { - compressionType = compression.Default +func registryConfig(hosts docker.RegistryHosts, ref reference.Named, scope string, insecure bool) (string, docker.RegistryHosts) { + if insecure { + insecureTrue := true + httpTrue := true + hosts = resolver.NewRegistryConfig(map[string]resolverconfig.RegistryConfig{ + reference.Domain(ref): { + Insecure: &insecureTrue, + PlainHTTP: &httpTrue, + }, + }) + scope += ":insecure" } - compressionConfig := compression.New(compressionType) - if v, ok := attrs[attrForceCompression]; ok { - var force bool - if v == "" { - force = true - } else { - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value %s specified for %s", v, attrForceCompression) - } - force = b - } - compressionConfig = compressionConfig.SetForce(force) - } - if v, ok := attrs[attrCompressionLevel]; ok { - ii, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return nil, errors.Wrapf(err, "non-integer value %s specified for %s", v, attrCompressionLevel) - } - compressionConfig = compressionConfig.SetLevel(int(ii)) - } - return &compressionConfig, nil + return scope, hosts } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go index a4f7f6ad05..004fac0521 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go @@ -291,7 +291,7 @@ func (cs *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheR return nil, errors.WithStack(solver.ErrNotFound) } -func (cs *cacheResultStorage) Exists(id string) bool { +func (cs *cacheResultStorage) Exists(ctx context.Context, id string) bool { return cs.byResultID(id) != nil } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go index 8c8bbde5dc..11ea24b865 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go @@ -39,7 +39,7 @@ func (c *CacheChains) Visited(v interface{}) bool { return ok } -func (c *CacheChains) normalize() error { +func (c *CacheChains) normalize(ctx context.Context) error { st := &normalizeState{ added: map[*item]*item{}, links: map[*item]map[nlink]map[digest.Digest]struct{}{}, @@ -66,7 +66,7 @@ func (c *CacheChains) normalize() error { } } - st.removeLoops() + st.removeLoops(ctx) items := make([]*item, 0, len(st.byKey)) for _, it := range st.byKey { @@ -77,7 +77,7 @@ func (c *CacheChains) normalize() error { } func (c *CacheChains) Marshal(ctx context.Context) (*CacheConfig, DescriptorProvider, error) { - if err := c.normalize(); err != nil { + if err := c.normalize(ctx); err != nil { return nil, nil, err } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go index f7139035fa..213e670a61 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go @@ -6,10 +6,10 @@ import ( "sort" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/bklog" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // sortConfig sorts the config structure to make sure it is deterministic @@ -128,7 +128,7 @@ type normalizeState struct { next int } -func (s *normalizeState) removeLoops() { +func (s *normalizeState) removeLoops(ctx context.Context) { roots := []digest.Digest{} for dgst, it := range s.byKey { if len(it.links) == 0 { @@ -139,11 +139,11 @@ func (s *normalizeState) removeLoops() { visited := map[digest.Digest]struct{}{} for _, d := range roots { - s.checkLoops(d, visited) + s.checkLoops(ctx, d, visited) } } -func (s *normalizeState) checkLoops(d digest.Digest, visited map[digest.Digest]struct{}) { +func (s *normalizeState) checkLoops(ctx context.Context, d digest.Digest, visited map[digest.Digest]struct{}) { it, ok := s.byKey[d] if !ok { return @@ -165,11 +165,11 @@ func (s *normalizeState) checkLoops(d digest.Digest, visited map[digest.Digest]s continue } if !it2.removeLink(it) { - logrus.Warnf("failed to remove looping cache key %s %s", d, id) + bklog.G(ctx).Warnf("failed to remove looping cache key %s %s", d, id) } delete(links[l], id) } else { - s.checkLoops(id, visited) + s.checkLoops(ctx, id, visited) } } } diff --git a/vendor/github.com/moby/buildkit/cache/util/fsutil.go b/vendor/github.com/moby/buildkit/cache/util/fsutil.go index e90ed45f77..945e017168 100644 --- a/vendor/github.com/moby/buildkit/cache/util/fsutil.go +++ b/vendor/github.com/moby/buildkit/cache/util/fsutil.go @@ -57,21 +57,25 @@ func ReadFile(ctx context.Context, mount snapshot.Mountable, req ReadRequest) ([ return errors.WithStack(err) } - if req.Range == nil { - dt, err = os.ReadFile(fp) - if err != nil { - return errors.WithStack(err) - } - } else { - f, err := os.Open(fp) - if err != nil { - return errors.WithStack(err) - } - dt, err = io.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length))) - f.Close() - if err != nil { - return errors.WithStack(err) + f, err := os.Open(fp) + if err != nil { + // The filename here is internal to the mount, so we can restore + // the request base path for error reporting. + // See os.DirFS.Open for details. + if pe, ok := err.(*os.PathError); ok { + pe.Path = req.Filename } + return errors.WithStack(err) + } + defer f.Close() + + var rdr io.Reader = f + if req.Range != nil { + rdr = io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length)) + } + dt, err = io.ReadAll(rdr) + if err != nil { + return errors.WithStack(err) } return nil }) diff --git a/vendor/github.com/moby/buildkit/client/client.go b/vendor/github.com/moby/buildkit/client/client.go index deac2507a9..1d60a70683 100644 --- a/vendor/github.com/moby/buildkit/client/client.go +++ b/vendor/github.com/moby/buildkit/client/client.go @@ -11,7 +11,6 @@ import ( contentapi "github.com/containerd/containerd/api/services/content/v1" "github.com/containerd/containerd/defaults" - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client/connhelper" "github.com/moby/buildkit/session" @@ -26,6 +25,7 @@ import ( sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/trace" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" ) @@ -35,7 +35,9 @@ type Client struct { sessionDialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) } -type ClientOpt interface{} +type ClientOpt interface { + isClientOpt() +} // New returns a new buildkit client. Address can be empty for the system-default address. func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) { @@ -44,8 +46,6 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)), } needDialer := true - needWithInsecure := true - tlsServerName := "" var unary []grpc.UnaryClientInterceptor var stream []grpc.StreamClientInterceptor @@ -54,19 +54,18 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error var tracerProvider trace.TracerProvider var tracerDelegate TracerDelegate var sessionDialer func(context.Context, string, map[string][]string) (net.Conn, error) + var customDialOptions []grpc.DialOption + var creds *withCredentials for _, o := range opts { if _, ok := o.(*withFailFast); ok { gopts = append(gopts, grpc.FailOnNonTempDialError(true)) } if credInfo, ok := o.(*withCredentials); ok { - opt, err := loadCredentials(credInfo) - if err != nil { - return nil, err + if creds == nil { + creds = &withCredentials{} } - gopts = append(gopts, opt) - needWithInsecure = false - tlsServerName = credInfo.ServerName + creds = creds.merge(credInfo) } if wt, ok := o.(*withTracer); ok { customTracer = true @@ -82,6 +81,19 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error if sd, ok := o.(*withSessionDialer); ok { sessionDialer = sd.dialer } + if opt, ok := o.(*withGRPCDialOption); ok { + customDialOptions = append(customDialOptions, opt.opt) + } + } + + if creds == nil { + gopts = append(gopts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } else { + credOpts, err := loadCredentials(creds) + if err != nil { + return nil, err + } + gopts = append(gopts, credOpts) } if !customTracer { @@ -103,9 +115,6 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error } gopts = append(gopts, grpc.WithContextDialer(dialFn)) } - if needWithInsecure { - gopts = append(gopts, grpc.WithTransportCredentials(insecure.NewCredentials())) - } if address == "" { address = appdefaults.Address } @@ -117,7 +126,10 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error // ref: https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.3 // - However, when TLS specified, grpc-go requires it must match // with its servername specified for certificate validation. - authority := tlsServerName + var authority string + if creds != nil && creds.serverName != "" { + authority = creds.serverName + } if authority == "" { // authority as hostname from target address uri, err := url.Parse(address) @@ -131,17 +143,9 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error unary = append(unary, grpcerrors.UnaryClientInterceptor) stream = append(stream, grpcerrors.StreamClientInterceptor) - if len(unary) == 1 { - gopts = append(gopts, grpc.WithUnaryInterceptor(unary[0])) - } else if len(unary) > 1 { - gopts = append(gopts, grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unary...))) - } - - if len(stream) == 1 { - gopts = append(gopts, grpc.WithStreamInterceptor(stream[0])) - } else if len(stream) > 1 { - gopts = append(gopts, grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(stream...))) - } + gopts = append(gopts, grpc.WithChainUnaryInterceptor(unary...)) + gopts = append(gopts, grpc.WithChainStreamInterceptor(stream...)) + gopts = append(gopts, customDialOptions...) conn, err := grpc.DialContext(ctx, address, gopts...) if err != nil { @@ -181,12 +185,27 @@ func (c *Client) Dialer() session.Dialer { return grpchijack.Dialer(c.ControlClient()) } +func (c *Client) Wait(ctx context.Context) error { + opts := []grpc.CallOption{grpc.WaitForReady(true)} + _, err := c.ControlClient().Info(ctx, &controlapi.InfoRequest{}, opts...) + if err != nil { + if code := grpcerrors.Code(err); code == codes.Unimplemented { + // only buildkit v0.11+ supports the info api, but an unimplemented + // response error is still a response so we can ignore it + return nil + } + } + return err +} + func (c *Client) Close() error { return c.conn.Close() } type withFailFast struct{} +func (*withFailFast) isClientOpt() {} + func WithFailFast() ClientOpt { return &withFailFast{} } @@ -195,50 +214,115 @@ type withDialer struct { dialer func(context.Context, string) (net.Conn, error) } +func (*withDialer) isClientOpt() {} + func WithContextDialer(df func(context.Context, string) (net.Conn, error)) ClientOpt { return &withDialer{dialer: df} } type withCredentials struct { - ServerName string - CACert string - Cert string - Key string + // server options + serverName string + caCert string + caCertSystem bool + + // client options + cert string + key string } +func (opts *withCredentials) merge(opts2 *withCredentials) *withCredentials { + result := *opts + if opts2 == nil { + return &result + } + + // server options + if opts2.serverName != "" { + result.serverName = opts2.serverName + } + if opts2.caCert != "" { + result.caCert = opts2.caCert + } + if opts2.caCertSystem { + result.caCertSystem = opts2.caCertSystem + } + + // client options + if opts2.cert != "" { + result.cert = opts2.cert + } + if opts2.key != "" { + result.key = opts2.key + } + + return &result +} + +func (*withCredentials) isClientOpt() {} + // WithCredentials configures the TLS parameters of the client. // Arguments: -// * serverName: specifies the name of the target server -// * ca: specifies the filepath of the CA certificate to use for verification -// * cert: specifies the filepath of the client certificate -// * key: specifies the filepath of the client key -func WithCredentials(serverName, ca, cert, key string) ClientOpt { - return &withCredentials{serverName, ca, cert, key} +// * cert: specifies the filepath of the client certificate +// * key: specifies the filepath of the client key +func WithCredentials(cert, key string) ClientOpt { + return &withCredentials{ + cert: cert, + key: key, + } +} + +// WithServerConfig configures the TLS parameters to connect to the server. +// Arguments: +// * serverName: specifies the server name to verify the hostname +// * caCert: specifies the filepath of the CA certificate +func WithServerConfig(serverName, caCert string) ClientOpt { + return &withCredentials{ + serverName: serverName, + caCert: caCert, + } +} + +// WithServerConfigSystem configures the TLS parameters to connect to the +// server, using the system's certificate pool. +func WithServerConfigSystem(serverName string) ClientOpt { + return &withCredentials{ + serverName: serverName, + caCertSystem: true, + } } func loadCredentials(opts *withCredentials) (grpc.DialOption, error) { - ca, err := os.ReadFile(opts.CACert) - if err != nil { - return nil, errors.Wrap(err, "could not read ca certificate") + cfg := &tls.Config{} + + if opts.caCertSystem { + cfg.RootCAs, _ = x509.SystemCertPool() + } + if cfg.RootCAs == nil { + cfg.RootCAs = x509.NewCertPool() } - certPool := x509.NewCertPool() - if ok := certPool.AppendCertsFromPEM(ca); !ok { - return nil, errors.New("failed to append ca certs") + if opts.caCert != "" { + ca, err := os.ReadFile(opts.caCert) + if err != nil { + return nil, errors.Wrap(err, "could not read ca certificate") + } + if ok := cfg.RootCAs.AppendCertsFromPEM(ca); !ok { + return nil, errors.New("failed to append ca certs") + } } - cfg := &tls.Config{ - ServerName: opts.ServerName, - RootCAs: certPool, + if opts.serverName != "" { + cfg.ServerName = opts.serverName } // we will produce an error if the user forgot about either cert or key if at least one is specified - if opts.Cert != "" || opts.Key != "" { - cert, err := tls.LoadX509KeyPair(opts.Cert, opts.Key) + if opts.cert != "" || opts.key != "" { + cert, err := tls.LoadX509KeyPair(opts.cert, opts.key) if err != nil { return nil, errors.Wrap(err, "could not read certificate/key") } - cfg.Certificates = []tls.Certificate{cert} + cfg.Certificates = append(cfg.Certificates, cert) } return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil @@ -252,6 +336,8 @@ type withTracer struct { tp trace.TracerProvider } +func (w *withTracer) isClientOpt() {} + type TracerDelegate interface { SetSpanExporter(context.Context, sdktrace.SpanExporter) error } @@ -266,6 +352,8 @@ type withTracerDelegate struct { TracerDelegate } +func (w *withTracerDelegate) isClientOpt() {} + func WithSessionDialer(dialer func(context.Context, string, map[string][]string) (net.Conn, error)) ClientOpt { return &withSessionDialer{dialer} } @@ -274,6 +362,8 @@ type withSessionDialer struct { dialer func(context.Context, string, map[string][]string) (net.Conn, error) } +func (w *withSessionDialer) isClientOpt() {} + func resolveDialer(address string) (func(context.Context, string) (net.Conn, error), error) { ch, err := connhelper.GetConnectionHelper(address) if err != nil { @@ -294,3 +384,13 @@ func filterInterceptor(intercept grpc.UnaryClientInterceptor) grpc.UnaryClientIn return intercept(ctx, method, req, reply, cc, invoker, opts...) } } + +type withGRPCDialOption struct { + opt grpc.DialOption +} + +func (*withGRPCDialOption) isClientOpt() {} + +func WithGRPCDialOption(opt grpc.DialOption) ClientOpt { + return &withGRPCDialOption{opt} +} diff --git a/vendor/github.com/moby/buildkit/client/llb/async.go b/vendor/github.com/moby/buildkit/client/llb/async.go index 73d2a92fa1..8771c71978 100644 --- a/vendor/github.com/moby/buildkit/client/llb/async.go +++ b/vendor/github.com/moby/buildkit/client/llb/async.go @@ -15,7 +15,7 @@ type asyncState struct { target State set bool err error - g flightcontrol.Group + g flightcontrol.Group[State] } func (as *asyncState) Output() Output { @@ -53,7 +53,7 @@ func (as *asyncState) ToInput(ctx context.Context, c *Constraints) (*pb.Input, e } func (as *asyncState) Do(ctx context.Context, c *Constraints) error { - _, err := as.g.Do(ctx, "", func(ctx context.Context) (interface{}, error) { + _, err := as.g.Do(ctx, "", func(ctx context.Context) (State, error) { if as.set { return as.target, as.err } diff --git a/vendor/github.com/moby/buildkit/client/llb/definition.go b/vendor/github.com/moby/buildkit/client/llb/definition.go index f92ee2d0ab..627accfebc 100644 --- a/vendor/github.com/moby/buildkit/client/llb/definition.go +++ b/vendor/github.com/moby/buildkit/client/llb/definition.go @@ -24,7 +24,7 @@ type DefinitionOp struct { platforms map[digest.Digest]*ocispecs.Platform dgst digest.Digest index pb.OutputIndex - inputCache map[digest.Digest][]*DefinitionOp + inputCache *sync.Map // shared and written among DefinitionOps so avoid race on this map using sync.Map } // NewDefinitionOp returns a new operation from a marshalled definition. @@ -70,7 +70,7 @@ func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) { state := NewState(op) st = &state } - sourceMaps[i] = NewSourceMap(st, info.Filename, info.Data) + sourceMaps[i] = NewSourceMap(st, info.Filename, info.Language, info.Data) } for dgst, locs := range def.Source.Locations { @@ -101,7 +101,7 @@ func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) { platforms: platforms, dgst: dgst, index: index, - inputCache: make(map[digest.Digest][]*DefinitionOp), + inputCache: new(sync.Map), }, nil } @@ -180,6 +180,18 @@ func (d *DefinitionOp) Output() Output { }} } +func (d *DefinitionOp) loadInputCache(dgst digest.Digest) ([]*DefinitionOp, bool) { + a, ok := d.inputCache.Load(dgst.String()) + if ok { + return a.([]*DefinitionOp), true + } + return nil, false +} + +func (d *DefinitionOp) storeInputCache(dgst digest.Digest, c []*DefinitionOp) { + d.inputCache.Store(dgst.String(), c) +} + func (d *DefinitionOp) Inputs() []Output { if d.dgst == "" { return nil @@ -195,7 +207,7 @@ func (d *DefinitionOp) Inputs() []Output { for _, input := range op.Inputs { var vtx *DefinitionOp d.mu.Lock() - if existingIndexes, ok := d.inputCache[input.Digest]; ok { + if existingIndexes, ok := d.loadInputCache(input.Digest); ok { if int(input.Index) < len(existingIndexes) && existingIndexes[input.Index] != nil { vtx = existingIndexes[input.Index] } @@ -211,14 +223,14 @@ func (d *DefinitionOp) Inputs() []Output { inputCache: d.inputCache, sources: d.sources, } - existingIndexes := d.inputCache[input.Digest] + existingIndexes, _ := d.loadInputCache(input.Digest) indexDiff := int(input.Index) - len(existingIndexes) if indexDiff >= 0 { // make room in the slice for the new index being set existingIndexes = append(existingIndexes, make([]*DefinitionOp, indexDiff+1)...) } existingIndexes[input.Index] = vtx - d.inputCache[input.Digest] = existingIndexes + d.storeInputCache(input.Digest, existingIndexes) } d.mu.Unlock() diff --git a/vendor/github.com/moby/buildkit/client/llb/diff.go b/vendor/github.com/moby/buildkit/client/llb/diff.go index b42fcbbcf4..1de2b6f04d 100644 --- a/vendor/github.com/moby/buildkit/client/llb/diff.go +++ b/vendor/github.com/moby/buildkit/client/llb/diff.go @@ -90,6 +90,8 @@ func (m *DiffOp) Inputs() (out []Output) { return out } +// Diff returns a state that represents the diff of the lower and upper states. +// The returned State is useful for use with [Merge] where you can merge the lower state with the diff. func Diff(lower, upper State, opts ...ConstraintsOpt) State { if lower.Output() == nil { if upper.Output() == nil { @@ -104,5 +106,5 @@ func Diff(lower, upper State, opts ...ConstraintsOpt) State { for _, o := range opts { o.SetConstraintsOption(&c) } - return NewState(NewDiff(lower, upper, c).Output()) + return lower.WithOutput(NewDiff(lower, upper, c).Output()) } diff --git a/vendor/github.com/moby/buildkit/client/llb/exec.go b/vendor/github.com/moby/buildkit/client/llb/exec.go index 2b1d9bd3f1..0eed6774c2 100644 --- a/vendor/github.com/moby/buildkit/client/llb/exec.go +++ b/vendor/github.com/moby/buildkit/client/llb/exec.go @@ -339,7 +339,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] inputIndex = pb.Empty } - outputIndex := pb.OutputIndex(-1) + outputIndex := pb.SkipOutput if !m.noOutput && !m.readonly && m.cacheID == "" && !m.tmpfs { outputIndex = pb.OutputIndex(outIndex) outIndex++ @@ -649,6 +649,7 @@ type SSHInfo struct { Optional bool } +// AddSecret is a RunOption that adds a secret to the exec. func AddSecret(dest string, opts ...SecretOption) RunOption { return runOptionFunc(func(ei *ExecInfo) { s := &SecretInfo{ID: dest, Target: dest, Mode: 0400} @@ -696,6 +697,7 @@ func SecretAsEnv(v bool) SecretOption { }) } +// SecretFileOpt sets the secret's target file uid, gid and permissions. func SecretFileOpt(uid, gid, mode int) SecretOption { return secretOptionFunc(func(si *SecretInfo) { si.UID = uid @@ -704,12 +706,15 @@ func SecretFileOpt(uid, gid, mode int) SecretOption { }) } +// ReadonlyRootFS sets the execs's root filesystem to be read-only. func ReadonlyRootFS() RunOption { return runOptionFunc(func(ei *ExecInfo) { ei.ReadonlyRootFS = true }) } +// WithProxy is a RunOption that sets the proxy environment variables in the resulting exec. +// For example `HTTP_PROXY` is a standard environment variable for unix systems that programs may read. func WithProxy(ps ProxyEnv) RunOption { return runOptionFunc(func(ei *ExecInfo) { ei.ProxyEnv = &ps diff --git a/vendor/github.com/moby/buildkit/client/llb/fileop.go b/vendor/github.com/moby/buildkit/client/llb/fileop.go index ffc6da19e4..7fc445c4c9 100644 --- a/vendor/github.com/moby/buildkit/client/llb/fileop.go +++ b/vendor/github.com/moby/buildkit/client/llb/fileop.go @@ -48,6 +48,7 @@ func NewFileOp(s State, action *FileAction, c Constraints) *FileOp { } // CopyInput is either llb.State or *FileActionWithState +// It is used by [Copy] to to specify the source of the copy operation. type CopyInput interface { isFileOpCopyInput() } @@ -60,6 +61,10 @@ type capAdder interface { addCaps(*FileOp) } +// FileAction is used to specify a file operation on a [State]. +// It can be used to create a directory, create a file, or remove a file, etc. +// This is used by [State.File] +// Typically a FileAction is created by calling one of the helper functions such as [Mkdir], [Copy], [Rm], [Mkfile] type FileAction struct { state *State prev *FileAction @@ -131,11 +136,16 @@ type fileActionWithState struct { func (fas *fileActionWithState) isFileOpCopyInput() {} +// Mkdir creates a FileAction which creates a directory at the given path. +// Example: +// +// llb.Scratch().File(llb.Mkdir("/foo", 0755)) func Mkdir(p string, m os.FileMode, opt ...MkdirOption) *FileAction { var mi MkdirInfo for _, o := range opt { o.SetMkdirOption(&mi) } + return &FileAction{ action: &fileActionMkdir{ file: p, @@ -181,6 +191,7 @@ func (fn mkdirOptionFunc) SetMkdirOption(mi *MkdirInfo) { var _ MkdirOption = &MkdirInfo{} +// WithParents is an option for Mkdir which creates parent directories if they do not exist. func WithParents(b bool) MkdirOption { return mkdirOptionFunc(func(mi *MkdirInfo) { mi.MakeParents = b @@ -282,6 +293,10 @@ func (up *UserOpt) marshal(base pb.InputIndex) *pb.UserOpt { return &pb.UserOpt{User: &pb.UserOpt_ByID{ByID: uint32(up.UID)}} } +// Mkfile creates a FileAction which creates a file at the given path with the provided contents. +// Example: +// +// llb.Scratch().File(llb.Mkfile("/foo", 0644, []byte("hello world!"))) func Mkfile(p string, m os.FileMode, dt []byte, opts ...MkfileOption) *FileAction { var mi MkfileInfo for _, o := range opts { @@ -332,6 +347,10 @@ func (a *fileActionMkfile) toProtoAction(ctx context.Context, parent string, bas }, nil } +// Rm creates a FileAction which removes a file or directory at the given path. +// Example: +// +// llb.Scratch().File(Mkfile("/foo", 0644, []byte("not around for long..."))).File(llb.Rm("/foo")) func Rm(p string, opts ...RmOption) *FileAction { var mi RmInfo for _, o := range opts { @@ -394,6 +413,25 @@ func (a *fileActionRm) toProtoAction(ctx context.Context, parent string, base pb }, nil } +// Copy produces a FileAction which copies a file or directory from the source to the destination. +// The "input" parameter is the contents to copy from. +// "src" is the path to copy from within the "input". +// "dest" is the path to copy to within the destination (the state being operated on). +// See [CopyInput] for the valid types of input. +// +// Example: +// +// st := llb.Local(".") +// llb.Scratch().File(llb.Copy(st, "/foo", "/bar")) +// +// The example copies the local (client) directory "./foo" to a new empty directory at /bar. +// +// Note: Copying directories can have different behavior based on if the destination exists or not. +// When the destination already exists, the contents of the source directory is copied underneath the destination, including the directory itself. +// You may need to supply a copy option to copy the dir contents only. +// You may also need to pass in a [CopyOption] which creates parent directories if they do not exist. +// +// See [CopyOption] for more details on what options are available. func Copy(input CopyInput, src, dest string, opts ...CopyOption) *FileAction { var state *State var fas *fileActionWithState @@ -410,7 +448,6 @@ func Copy(input CopyInput, src, dest string, opts ...CopyOption) *FileAction { for _, o := range opts { o.SetCopyOption(&mi) } - return &FileAction{ action: &fileActionCopy{ state: state, @@ -486,22 +523,19 @@ func (a *fileActionCopy) toProtoAction(ctx context.Context, parent string, base func (a *fileActionCopy) sourcePath(ctx context.Context) (string, error) { p := path.Clean(a.src) + dir := "/" + var err error if !path.IsAbs(p) { if a.state != nil { - dir, err := a.state.GetDir(ctx) - if err != nil { - return "", err - } - p = path.Join("/", dir, p) + dir, err = a.state.GetDir(ctx) } else if a.fas != nil { - dir, err := a.fas.state.GetDir(ctx) - if err != nil { - return "", err - } - p = path.Join("/", dir, p) + dir, err = a.fas.state.GetDir(ctx) + } + if err != nil { + return "", err } } - return p, nil + return path.Join(dir, p), nil } func (a *fileActionCopy) addCaps(f *FileOp) { @@ -691,6 +725,7 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] } pop, md := MarshalConstraints(c, &f.constraints) + pop.Platform = nil // file op is not platform specific pop.Op = &pb.Op_File{ File: pfo, } @@ -702,7 +737,7 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] pop.Inputs = state.inputs for i, st := range state.actions { - output := pb.OutputIndex(-1) + output := pb.SkipOutput if i+1 == len(state.actions) { output = 0 } diff --git a/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go b/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go index 6dd40b6943..8a3a629954 100644 --- a/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go +++ b/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go @@ -45,7 +45,6 @@ func New(with ...ImageMetaResolverOpt) llb.ImageMetaResolver { headers.Set("User-Agent", version.UserAgent()) return &imageMetaResolver{ resolver: docker.NewResolver(docker.ResolverOptions{ - Client: http.DefaultClient, Headers: headers, }), platform: opts.platform, @@ -71,11 +70,12 @@ type imageMetaResolver struct { } type resolveResult struct { + ref string config []byte dgst digest.Digest } -func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) { +func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (string, digest.Digest, []byte, error) { imr.locker.Lock(ref) defer imr.locker.Unlock(ref) @@ -87,16 +87,16 @@ func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string k := imr.key(ref, platform) if res, ok := imr.cache[k]; ok { - return res.dgst, res.config, nil + return res.ref, res.dgst, res.config, nil } - dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, nil, platform) + ref, dgst, config, err := imageutil.Config(ctx, ref, imr.resolver, imr.buffer, nil, platform, opt.SourcePolicies) if err != nil { - return "", nil, err + return "", "", nil, err } - imr.cache[k] = resolveResult{dgst: dgst, config: config} - return dgst, config, nil + imr.cache[k] = resolveResult{dgst: dgst, config: config, ref: ref} + return ref, dgst, config, nil } func (imr *imageMetaResolver) key(ref string, platform *ocispecs.Platform) string { diff --git a/vendor/github.com/moby/buildkit/client/llb/merge.go b/vendor/github.com/moby/buildkit/client/llb/merge.go index 8177d71d2a..ee5f653642 100644 --- a/vendor/github.com/moby/buildkit/client/llb/merge.go +++ b/vendor/github.com/moby/buildkit/client/llb/merge.go @@ -70,6 +70,31 @@ func (m *MergeOp) Inputs() []Output { return m.inputs } +// Merge merges multiple states into a single state. This is useful in +// conjunction with [Diff] to create set of patches which are independent of +// each other to a base state without affecting the cache of other merged +// states. +// As an example, lets say you have a rootfs with the following directories: +// +// / /bin /etc /opt /tmp +// +// Now lets say you want to copy a directory /etc/foo from one state and a +// binary /bin/bar from another state. +// [Copy] makes a duplicate of file on top of another directory. +// Merge creates a directory whose contents is an overlay of 2 states on top of each other. +// +// With "Merge" you can do this: +// +// fooState := Diff(rootfs, fooState) +// barState := Diff(rootfs, barState) +// +// Then merge the results with: +// +// Merge(rootfs, fooDiff, barDiff) +// +// The resulting state will have both /etc/foo and /bin/bar, but because Merge +// was used, changing the contents of "fooDiff" does not require copying +// "barDiff" again. func Merge(inputs []State, opts ...ConstraintsOpt) State { // filter out any scratch inputs, which have no effect when merged var filteredInputs []State @@ -92,5 +117,5 @@ func Merge(inputs []State, opts ...ConstraintsOpt) State { o.SetConstraintsOption(&c) } addCap(&c, pb.CapMergeOp) - return NewState(NewMerge(filteredInputs, c).Output()) + return filteredInputs[0].WithOutput(NewMerge(filteredInputs, c).Output()) } diff --git a/vendor/github.com/moby/buildkit/client/llb/meta.go b/vendor/github.com/moby/buildkit/client/llb/meta.go index b98b6d1063..ab1021bd65 100644 --- a/vendor/github.com/moby/buildkit/client/llb/meta.go +++ b/vendor/github.com/moby/buildkit/client/llb/meta.go @@ -10,6 +10,7 @@ import ( "github.com/google/shlex" "github.com/moby/buildkit/solver/pb" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) type contextKeyT string @@ -29,10 +30,15 @@ var ( keySecurity = contextKeyT("llb.security") ) +// AddEnvf is the same as [AddEnv] but allows for a format string. +// This is the equivalent of `[State.AddEnvf]` func AddEnvf(key, value string, v ...interface{}) StateOption { return addEnvf(key, value, true, v...) } +// AddEnv returns a [StateOption] whichs adds an environment variable to the state. +// Use this with [State.With] to create a new state with the environment variable set. +// This is the equivalent of `[State.AddEnv]` func AddEnv(key, value string) StateOption { return addEnvf(key, value, false) } @@ -52,10 +58,14 @@ func addEnvf(key, value string, replace bool, v ...interface{}) StateOption { } } +// Dir returns a [StateOption] sets the working directory for the state which will be used to resolve +// relative paths as well as the working directory for [State.Run]. +// See [State.With] for where to use this. func Dir(str string) StateOption { return dirf(str, false) } +// Dirf is the same as [Dir] but allows for a format string. func Dirf(str string, v ...interface{}) StateOption { return dirf(str, true, v...) } @@ -69,7 +79,7 @@ func dirf(value string, replace bool, v ...interface{}) StateOption { if !path.IsAbs(value) { prev, err := getDir(s)(ctx, c) if err != nil { - return nil, err + return nil, errors.Wrap(err, "getting dir from state") } if prev == "" { prev = "/" @@ -81,12 +91,18 @@ func dirf(value string, replace bool, v ...interface{}) StateOption { } } +// User returns a [StateOption] which sets the user for the state which will be used by [State.Run]. +// This is the equivalent of [State.User] +// See [State.With] for where to use this. func User(str string) StateOption { return func(s State) State { return s.WithValue(keyUser, str) } } +// Reset returns a [StateOption] which creates a new [State] with just the +// output of the current [State] and the provided [State] is set as the parent. +// This is the equivalent of [State.Reset] func Reset(other State) StateOption { return func(s State) State { s = NewState(s.Output()) @@ -147,6 +163,9 @@ func getUser(s State) func(context.Context, *Constraints) (string, error) { } } +// Hostname returns a [StateOption] which sets the hostname used for containers created by [State.Run]. +// This is the equivalent of [State.Hostname] +// See [State.With] for where to use this. func Hostname(str string) StateOption { return func(s State) State { return s.WithValue(keyHostname, str) @@ -283,6 +302,9 @@ func getCgroupParent(s State) func(context.Context, *Constraints) (string, error } } +// Network returns a [StateOption] which sets the network mode used for containers created by [State.Run]. +// This is the equivalent of [State.Network] +// See [State.With] for where to use this. func Network(v pb.NetMode) StateOption { return func(s State) State { return s.WithValue(keyNetwork, v) @@ -302,6 +324,9 @@ func getNetwork(s State) func(context.Context, *Constraints) (pb.NetMode, error) } } +// Security returns a [StateOption] which sets the security mode used for containers created by [State.Run]. +// This is the equivalent of [State.Security] +// See [State.With] for where to use this. func Security(v pb.SecurityMode) StateOption { return func(s State) State { return s.WithValue(keySecurity, v) diff --git a/vendor/github.com/moby/buildkit/client/llb/resolver.go b/vendor/github.com/moby/buildkit/client/llb/resolver.go index b3b9cdf751..02644f62c7 100644 --- a/vendor/github.com/moby/buildkit/client/llb/resolver.go +++ b/vendor/github.com/moby/buildkit/client/llb/resolver.go @@ -3,6 +3,7 @@ package llb import ( "context" + spb "github.com/moby/buildkit/sourcepolicy/pb" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -31,7 +32,7 @@ func WithLayerLimit(l int) ImageOption { // ImageMetaResolver can resolve image config metadata from a reference type ImageMetaResolver interface { - ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (digest.Digest, []byte, error) + ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (string, digest.Digest, []byte, error) } type ResolverType int @@ -49,6 +50,8 @@ type ResolveImageConfigOpt struct { LogName string Store ResolveImageConfigOptStore + + SourcePolicies []*spb.Policy } type ResolveImageConfigOptStore struct { diff --git a/vendor/github.com/moby/buildkit/client/llb/source.go b/vendor/github.com/moby/buildkit/client/llb/source.go index 27c8c1b617..fa1096a67c 100644 --- a/vendor/github.com/moby/buildkit/client/llb/source.go +++ b/vendor/github.com/moby/buildkit/client/llb/source.go @@ -91,6 +91,10 @@ func (s *SourceOp) Inputs() []Output { return nil } +// Image returns a state that represents a docker image in a registry. +// Example: +// +// st := llb.Image("busybox:latest") func Image(ref string, opts ...ImageOption) State { r, err := reference.ParseNormalizedNamed(ref) if err == nil { @@ -131,7 +135,7 @@ func Image(ref string, opts ...ImageOption) State { if p == nil { p = c.Platform } - _, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, ResolveImageConfigOpt{ + _, _, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, ResolveImageConfigOpt{ Platform: p, ResolveMode: info.resolveMode.String(), ResolverType: ResolverTypeRegistry, @@ -147,7 +151,7 @@ func Image(ref string, opts ...ImageOption) State { if p == nil { p = c.Platform } - dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{ + ref, dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{ Platform: p, ResolveMode: info.resolveMode.String(), ResolverType: ResolverTypeRegistry, @@ -155,6 +159,10 @@ func Image(ref string, opts ...ImageOption) State { if err != nil { return State{}, err } + r, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return State{}, err + } if dgst != "" { r, err = reference.WithDigest(r, dgst) if err != nil { @@ -215,6 +223,20 @@ type ImageInfo struct { RecordType string } +// Git returns a state that represents a git repository. +// Example: +// +// st := llb.Git("https://github.com/moby/buildkit.git#v0.11.6") +// +// The example fetches the v0.11.6 tag of the buildkit repository. +// You can also use a commit hash or a branch name. +// +// Other URL formats are supported such as "git@github.com:moby/buildkit.git", "git://...", "ssh://..." +// Formats that utilize SSH may need to supply credentials as a [GitOption]. +// You may need to check the source code for a full list of supported formats. +// +// By default the git repository is cloned with `--depth=1` to reduce the amount of data downloaded. +// Additionally the ".git" directory is removed after the clone, you can keep ith with the [KeepGitDir] [GitOption]. func Git(remote, ref string, opts ...GitOption) State { url := strings.Split(remote, "#")[0] @@ -346,10 +368,12 @@ func MountSSHSock(sshID string) GitOption { }) } +// Scratch returns a state that represents an empty filesystem. func Scratch() State { return NewState(nil) } +// Local returns a state that represents a directory local to the client. func Local(name string, opts ...LocalOption) State { gi := &LocalInfo{} diff --git a/vendor/github.com/moby/buildkit/client/llb/sourcemap.go b/vendor/github.com/moby/buildkit/client/llb/sourcemap.go index 17cc1de6f5..721db3cebe 100644 --- a/vendor/github.com/moby/buildkit/client/llb/sourcemap.go +++ b/vendor/github.com/moby/buildkit/client/llb/sourcemap.go @@ -7,17 +7,30 @@ import ( digest "github.com/opencontainers/go-digest" ) +// SourceMap maps a source file/location to an LLB state/definition. +// SourceMaps are used to provide information for debugging and helpful error messages to the user. +// As an example, lets say you have a Dockerfile with the following content: +// +// FROM alpine +// RUN exit 1 +// +// When the "RUN" statement exits with a non-zero exit code buildkit will treat +// it as an error and is able to provide the user with a helpful error message +// pointing to exactly the line in the Dockerfile that caused the error. type SourceMap struct { State *State Definition *Definition Filename string - Data []byte + // Language should use names defined in https://github.com/github/linguist/blob/v7.24.1/lib/linguist/languages.yml + Language string + Data []byte } -func NewSourceMap(st *State, filename string, dt []byte) *SourceMap { +func NewSourceMap(st *State, filename string, lang string, dt []byte) *SourceMap { return &SourceMap{ State: st, Filename: filename, + Language: lang, Data: dt, } } @@ -82,6 +95,7 @@ func (smc *sourceMapCollector) Marshal(ctx context.Context, co ...ConstraintsOpt info := &pb.SourceInfo{ Data: m.Data, Filename: m.Filename, + Language: m.Language, } if def != nil { diff --git a/vendor/github.com/moby/buildkit/client/llb/state.go b/vendor/github.com/moby/buildkit/client/llb/state.go index 7d35f3be59..f15fad87ab 100644 --- a/vendor/github.com/moby/buildkit/client/llb/state.go +++ b/vendor/github.com/moby/buildkit/client/llb/state.go @@ -49,6 +49,12 @@ func NewState(o Output) State { return s } +// State represents all operations that must be done to produce a given output. +// States are immutable, and all operations return a new state linked to the previous one. +// State is the core type of the LLB API and is used to build a graph of operations. +// The graph is then marshaled into a definition that can be executed by a backend (such as buildkitd). +// +// Operations performed on a State are executed lazily after the entire state graph is marshalled and sent to the backend. type State struct { out Output prev *State @@ -123,6 +129,7 @@ func (s State) SetMarshalDefaults(co ...ConstraintsOpt) State { return s } +// Marshal marshals the state and all its parents into a [Definition]. func (s State) Marshal(ctx context.Context, co ...ConstraintsOpt) (*Definition, error) { c := NewConstraints(append(s.opts, co...)...) def := &Definition{ @@ -208,10 +215,13 @@ func marshal(ctx context.Context, v Vertex, def *Definition, s *sourceMapCollect return def, nil } +// Validate validates the state. +// This validation, unlike most other operations on [State], is not lazily performed. func (s State) Validate(ctx context.Context, c *Constraints) error { return s.Output().Vertex(ctx, c).Validate(ctx, c) } +// Output returns the output of the state. func (s State) Output() Output { if s.async != nil { return s.async.Output() @@ -219,6 +229,7 @@ func (s State) Output() Output { return s.out } +// WithOutput creats a new state with the output set to the given output. func (s State) WithOutput(o Output) State { prev := s s = State{ @@ -229,6 +240,7 @@ func (s State) WithOutput(o Output) State { return s } +// WithImageConfig adds the environment variables, working directory, and platform specified in the image config to the state. func (s State) WithImageConfig(c []byte) (State, error) { var img ocispecs.Image if err := json.Unmarshal(c, &img); err != nil { @@ -255,6 +267,12 @@ func (s State) WithImageConfig(c []byte) (State, error) { return s, nil } +// Run performs the command specified by the arguments within the contexst of the current [State]. +// The command is executed as a container with the [State]'s filesystem as the root filesystem. +// As such any command you run must be present in the [State]'s filesystem. +// Constraints such as [State.Ulimit], [State.ParentCgroup], [State.Network], etc. are applied to the container. +// +// Run is useful when none of the LLB ops are sufficient for the operation that you want to perform. func (s State) Run(ro ...RunOption) ExecState { ei := &ExecInfo{State: s} for _, o := range ro { @@ -273,6 +291,8 @@ func (s State) Run(ro ...RunOption) ExecState { } } +// File performs a file operation on the current state. +// See [FileAction] for details on the operations that can be performed. func (s State) File(a *FileAction, opts ...ConstraintsOpt) State { var c Constraints for _, o := range opts { @@ -282,21 +302,29 @@ func (s State) File(a *FileAction, opts ...ConstraintsOpt) State { return s.WithOutput(NewFileOp(s, a, c).Output()) } +// AddEnv returns a new [State] with the provided environment variable set. +// See [AddEnv] func (s State) AddEnv(key, value string) State { return AddEnv(key, value)(s) } +// AddEnvf is the same as [State.AddEnv] but with a format string. func (s State) AddEnvf(key, value string, v ...interface{}) State { return AddEnvf(key, value, v...)(s) } +// Dir returns a new [State] with the provided working directory set. +// See [Dir] func (s State) Dir(str string) State { return Dir(str)(s) } + +// Dirf is the same as [State.Dir] but with a format string. func (s State) Dirf(str string, v ...interface{}) State { return Dirf(str, v...)(s) } +// GetEnv returns the value of the environment variable with the provided key. func (s State) GetEnv(ctx context.Context, key string, co ...ConstraintsOpt) (string, bool, error) { c := &Constraints{} for _, f := range co { @@ -310,6 +338,8 @@ func (s State) GetEnv(ctx context.Context, key string, co ...ConstraintsOpt) (st return v, ok, nil } +// Env returns a new [State] with the provided environment variable set. +// See [Env] func (s State) Env(ctx context.Context, co ...ConstraintsOpt) ([]string, error) { c := &Constraints{} for _, f := range co { @@ -322,6 +352,7 @@ func (s State) Env(ctx context.Context, co ...ConstraintsOpt) ([]string, error) return env.ToArray(), nil } +// GetDir returns the current working directory for the state. func (s State) GetDir(ctx context.Context, co ...ConstraintsOpt) (string, error) { c := &Constraints{} for _, f := range co { @@ -338,18 +369,28 @@ func (s State) GetArgs(ctx context.Context, co ...ConstraintsOpt) ([]string, err return getArgs(s)(ctx, c) } +// Reset is used to return a new [State] with all of the current state and the +// provided [State] as the parent. In effect you can think of this as creating +// a new state with all the output from the current state but reparented to the +// provided state. See [Reset] for more details. func (s State) Reset(s2 State) State { return Reset(s2)(s) } +// User sets the user for this state. +// See [User] for more details. func (s State) User(v string) State { return User(v)(s) } +// Hostname sets the hostname for this state. +// See [Hostname] for more details. func (s State) Hostname(v string) State { return Hostname(v)(s) } +// GetHostname returns the hostname set on the state. +// See [Hostname] for more details. func (s State) GetHostname(ctx context.Context, co ...ConstraintsOpt) (string, error) { c := &Constraints{} for _, f := range co { @@ -358,10 +399,14 @@ func (s State) GetHostname(ctx context.Context, co ...ConstraintsOpt) (string, e return getHostname(s)(ctx, c) } +// Platform sets the platform for the state. Platforms are used to determine +// image variants to pull and run as well as the platform metadata to set on the +// image config. func (s State) Platform(p ocispecs.Platform) State { return platform(p)(s) } +// GetPlatform returns the platform for the state. func (s State) GetPlatform(ctx context.Context, co ...ConstraintsOpt) (*ocispecs.Platform, error) { c := &Constraints{} for _, f := range co { @@ -370,10 +415,14 @@ func (s State) GetPlatform(ctx context.Context, co ...ConstraintsOpt) (*ocispecs return getPlatform(s)(ctx, c) } +// Network sets the network mode for the state. +// Network modes are used by [State.Run] to determine the network mode used when running the container. +// Network modes are not applied to image configs. func (s State) Network(n pb.NetMode) State { return Network(n)(s) } +// GetNetwork returns the network mode for the state. func (s State) GetNetwork(ctx context.Context, co ...ConstraintsOpt) (pb.NetMode, error) { c := &Constraints{} for _, f := range co { @@ -381,10 +430,15 @@ func (s State) GetNetwork(ctx context.Context, co ...ConstraintsOpt) (pb.NetMode } return getNetwork(s)(ctx, c) } + +// Security sets the security mode for the state. +// Security modes are used by [State.Run] to the privileges that processes in the container will run with. +// Security modes are not applied to image configs. func (s State) Security(n pb.SecurityMode) State { return Security(n)(s) } +// GetSecurity returns the security mode for the state. func (s State) GetSecurity(ctx context.Context, co ...ConstraintsOpt) (pb.SecurityMode, error) { c := &Constraints{} for _, f := range co { @@ -393,6 +447,8 @@ func (s State) GetSecurity(ctx context.Context, co ...ConstraintsOpt) (pb.Securi return getSecurity(s)(ctx, c) } +// With applies [StateOption]s to the [State]. +// Each applied [StateOption] creates a new [State] object with the previous as its parent. func (s State) With(so ...StateOption) State { for _, o := range so { s = o(s) @@ -400,14 +456,23 @@ func (s State) With(so ...StateOption) State { return s } +// AddExtraHost adds a host name to IP mapping to any containers created from this state. func (s State) AddExtraHost(host string, ip net.IP) State { return extraHost(host, ip)(s) } +// AddUlimit sets the hard/soft for the given ulimit. +// The ulimit is applied to containers created from this state. +// Ulimits are Linux specific and only applies to containers created from this state such as via `[State.Run]` +// Ulimits do not apply to image configs. func (s State) AddUlimit(name UlimitName, soft int64, hard int64) State { return ulimit(name, soft, hard)(s) } +// WithCgroupParent sets the parent cgroup for any containers created from this state. +// This is useful when you want to apply resource constraints to a group of containers. +// Cgroups are Linux specific and only applies to containers created from this state such as via `[State.Run]` +// Cgroups do not apply to image configs. func (s State) WithCgroupParent(cp string) State { return cgroupParent(cp)(s) } diff --git a/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go b/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go index 3731ff36bb..156976f5dd 100644 --- a/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go +++ b/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go @@ -20,15 +20,18 @@ const ( ) type StoreIndex struct { - indexPath string - lockPath string + indexPath string + lockPath string + layoutPath string } func NewStoreIndex(storePath string) StoreIndex { indexPath := path.Join(storePath, indexFile) + layoutPath := path.Join(storePath, ocispecs.ImageLayoutFile) return StoreIndex{ - indexPath: indexPath, - lockPath: indexPath + lockFileSuffix, + indexPath: indexPath, + lockPath: indexPath + lockFileSuffix, + layoutPath: layoutPath, } } @@ -58,6 +61,7 @@ func (s StoreIndex) Read() (*ocispecs.Index, error) { } func (s StoreIndex) Put(tag string, desc ocispecs.Descriptor) error { + // lock the store to prevent concurrent access lock := flock.New(s.lockPath) locked, err := lock.TryLock() if err != nil { @@ -71,20 +75,33 @@ func (s StoreIndex) Put(tag string, desc ocispecs.Descriptor) error { os.RemoveAll(s.lockPath) }() - f, err := os.OpenFile(s.indexPath, os.O_RDWR|os.O_CREATE, 0644) + // create the oci-layout file + layout := ocispecs.ImageLayout{ + Version: ocispecs.ImageLayoutVersion, + } + layoutData, err := json.Marshal(layout) + if err != nil { + return err + } + if err := os.WriteFile(s.layoutPath, layoutData, 0644); err != nil { + return err + } + + // modify the index file + idxFile, err := os.OpenFile(s.indexPath, os.O_RDWR|os.O_CREATE, 0644) if err != nil { return errors.Wrapf(err, "could not open %s", s.indexPath) } - defer f.Close() + defer idxFile.Close() var idx ocispecs.Index - b, err := io.ReadAll(f) + idxData, err := io.ReadAll(idxFile) if err != nil { return errors.Wrapf(err, "could not read %s", s.indexPath) } - if len(b) > 0 { - if err := json.Unmarshal(b, &idx); err != nil { - return errors.Wrapf(err, "could not unmarshal %s (%q)", s.indexPath, string(b)) + if len(idxData) > 0 { + if err := json.Unmarshal(idxData, &idx); err != nil { + return errors.Wrapf(err, "could not unmarshal %s (%q)", s.indexPath, string(idxData)) } } @@ -92,15 +109,15 @@ func (s StoreIndex) Put(tag string, desc ocispecs.Descriptor) error { return err } - b, err = json.Marshal(idx) + idxData, err = json.Marshal(idx) if err != nil { return err } - if _, err = f.WriteAt(b, 0); err != nil { - return err + if _, err = idxFile.WriteAt(idxData, 0); err != nil { + return errors.Wrapf(err, "could not write %s", s.indexPath) } - if err = f.Truncate(int64(len(b))); err != nil { - return err + if err = idxFile.Truncate(int64(len(idxData))); err != nil { + return errors.Wrapf(err, "could not truncate %s", s.indexPath) } return nil } diff --git a/vendor/github.com/moby/buildkit/client/solve.go b/vendor/github.com/moby/buildkit/client/solve.go index 65183d61cd..22ff2031d4 100644 --- a/vendor/github.com/moby/buildkit/client/solve.go +++ b/vendor/github.com/moby/buildkit/client/solve.go @@ -169,7 +169,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG } if supportFile && supportDir { - return nil, errors.Errorf("both file and directory output is not support by %s exporter", ex.Type) + return nil, errors.Errorf("both file and directory output is not supported by %s exporter", ex.Type) } if !supportFile && ex.Output != nil { return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type) diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go index 1734d5e156..a92588e53f 100644 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go @@ -7,6 +7,7 @@ import ( // Config provides containerd configuration data for the server type Config struct { Debug bool `toml:"debug"` + Trace bool `toml:"trace"` // Root is the path to a directory where buildkit will store persistent data Root string `toml:"root"` @@ -47,7 +48,7 @@ type TLSConfig struct { type GCConfig struct { GC *bool `toml:"gc"` - GCKeepStorage int64 `toml:"gckeepstorage"` + GCKeepStorage DiskSpace `toml:"gckeepstorage"` GCPolicy []GCPolicy `toml:"gcpolicy"` } @@ -114,10 +115,10 @@ type ContainerdConfig struct { } type GCPolicy struct { - All bool `toml:"all"` - KeepBytes int64 `toml:"keepBytes"` - KeepDuration int64 `toml:"keepDuration"` - Filters []string `toml:"filters"` + All bool `toml:"all"` + KeepBytes DiskSpace `toml:"keepBytes"` + KeepDuration Duration `toml:"keepDuration"` + Filters []string `toml:"filters"` } type DNSConfig struct { @@ -127,6 +128,6 @@ type DNSConfig struct { } type HistoryConfig struct { - MaxAge int64 `toml:"maxAge"` - MaxEntries int64 `toml:"maxEntries"` + MaxAge Duration `toml:"maxAge"` + MaxEntries int64 `toml:"maxEntries"` } diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go index 6f3f197893..4078cc6d59 100644 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go @@ -1,21 +1,86 @@ package config +import ( + "encoding" + "strconv" + "strings" + "time" + + "github.com/docker/go-units" + "github.com/pkg/errors" +) + +type Duration struct { + time.Duration +} + +func (d *Duration) UnmarshalText(textb []byte) error { + text := stripQuotes(string(textb)) + if len(text) == 0 { + return nil + } + + if duration, err := time.ParseDuration(text); err == nil { + d.Duration = duration + return nil + } + + if i, err := strconv.ParseInt(text, 10, 64); err == nil { + d.Duration = time.Duration(i) * time.Second + return nil + } + + return errors.Errorf("invalid duration %s", text) +} + +var _ encoding.TextUnmarshaler = &Duration{} + +type DiskSpace struct { + Bytes int64 + Percentage int64 +} + +var _ encoding.TextUnmarshaler = &DiskSpace{} + +func (d *DiskSpace) UnmarshalText(textb []byte) error { + text := stripQuotes(string(textb)) + if len(text) == 0 { + return nil + } + + if text2 := strings.TrimSuffix(text, "%"); len(text2) < len(text) { + i, err := strconv.ParseInt(text2, 10, 64) + if err != nil { + return err + } + d.Percentage = i + return nil + } + + if i, err := units.RAMInBytes(text); err == nil { + d.Bytes = i + return nil + } + + return errors.Errorf("invalid disk space %s", text) +} + const defaultCap int64 = 2e9 // 2GB -func DefaultGCPolicy(p string, keep int64) []GCPolicy { - if keep == 0 { - keep = DetectDefaultGCCap(p) +func DefaultGCPolicy(keep DiskSpace) []GCPolicy { + if keep == (DiskSpace{}) { + keep = DetectDefaultGCCap() } return []GCPolicy{ // if build cache uses more than 512MB delete the most easily reproducible data after it has not been used for 2 days { Filters: []string{"type==source.local,type==exec.cachemount,type==source.git.checkout"}, - KeepDuration: 48 * 3600, // 48h - KeepBytes: 512 * 1e6, // 512MB + KeepDuration: Duration{Duration: time.Duration(48) * time.Hour}, // 48h + KeepBytes: DiskSpace{Bytes: 512 * 1e6}, // 512MB }, // remove any data not used for 60 days { - KeepDuration: 60 * 24 * 3600, // 60d + KeepDuration: Duration{Duration: time.Duration(60) * 24 * time.Hour}, // 60d KeepBytes: keep, }, // keep the unshared build cache under cap @@ -29,3 +94,13 @@ func DefaultGCPolicy(p string, keep int64) []GCPolicy { }, } } + +func stripQuotes(s string) string { + if len(s) == 0 { + return s + } + if s[0] == '"' && s[len(s)-1] == '"' { + return s[1 : len(s)-1] + } + return s +} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go index a2efe6f568..232a9ac336 100644 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go @@ -7,12 +7,23 @@ import ( "syscall" ) -func DetectDefaultGCCap(root string) int64 { +func DetectDefaultGCCap() DiskSpace { + return DiskSpace{Percentage: 10} +} + +func (d DiskSpace) AsBytes(root string) int64 { + if d.Bytes != 0 { + return d.Bytes + } + if d.Percentage == 0 { + return 0 + } + var st syscall.Statfs_t if err := syscall.Statfs(root, &st); err != nil { return defaultCap } diskSize := int64(st.Bsize) * int64(st.Blocks) - avail := diskSize / 10 + avail := diskSize * d.Percentage / 100 return (avail/(1<<30) + 1) * 1e9 // round up } diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go index 349fddbd51..55ce4dd772 100644 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go @@ -3,6 +3,10 @@ package config -func DetectDefaultGCCap(root string) int64 { - return defaultCap +func DetectDefaultGCCap() DiskSpace { + return DiskSpace{Bytes: defaultCap} +} + +func (d DiskSpace) AsBytes(root string) int64 { + return d.Bytes } diff --git a/vendor/github.com/moby/buildkit/control/control.go b/vendor/github.com/moby/buildkit/control/control.go index 2bd06db257..8a09b8ace3 100644 --- a/vendor/github.com/moby/buildkit/control/control.go +++ b/vendor/github.com/moby/buildkit/control/control.go @@ -10,7 +10,6 @@ import ( contentapi "github.com/containerd/containerd/api/services/content/v1" "github.com/containerd/containerd/content" - "github.com/containerd/containerd/leases" "github.com/containerd/containerd/services/content/contentserver" "github.com/docker/distribution/reference" "github.com/mitchellh/hashstructure/v2" @@ -18,20 +17,24 @@ import ( apitypes "github.com/moby/buildkit/api/types" "github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/client" + "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/cmd/buildkitd/config" controlgateway "github.com/moby/buildkit/control/gateway" "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/exporter/util/epoch" "github.com/moby/buildkit/frontend" "github.com/moby/buildkit/frontend/attestations" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/grpchijack" + containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/llbsolver" "github.com/moby/buildkit/solver/llbsolver/proc" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/throttle" "github.com/moby/buildkit/util/tracing/transform" "github.com/moby/buildkit/version" @@ -52,14 +55,14 @@ type Opt struct { SessionManager *session.Manager WorkerController *worker.Controller Frontends map[string]frontend.Frontend - CacheKeyStorage solver.CacheKeyStorage + CacheManager solver.CacheManager ResolveCacheExporterFuncs map[string]remotecache.ResolveCacheExporterFunc ResolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc Entitlements []string TraceCollector sdktrace.SpanExporter HistoryDB *bbolt.DB - LeaseManager leases.Manager - ContentStore content.Store + LeaseManager *leaseutil.Manager + ContentStore *containerdsnapshot.Store HistoryConfig *config.HistoryConfig } @@ -77,21 +80,22 @@ type Controller struct { // TODO: ControlService } func NewController(opt Opt) (*Controller, error) { - cache := solver.NewCacheManager(context.TODO(), "local", opt.CacheKeyStorage, worker.NewCacheResultStorage(opt.WorkerController)) - gatewayForwarder := controlgateway.NewGatewayForwarder() - hq := llbsolver.NewHistoryQueue(llbsolver.HistoryQueueOpt{ + hq, err := llbsolver.NewHistoryQueue(llbsolver.HistoryQueueOpt{ DB: opt.HistoryDB, LeaseManager: opt.LeaseManager, ContentStore: opt.ContentStore, CleanConfig: opt.HistoryConfig, }) + if err != nil { + return nil, errors.Wrap(err, "failed to create history queue") + } s, err := llbsolver.New(llbsolver.Opt{ WorkerController: opt.WorkerController, Frontends: opt.Frontends, - CacheManager: cache, + CacheManager: opt.CacheManager, CacheResolvers: opt.ResolveCacheImporterFuncs, GatewayForwarder: gatewayForwarder, SessionManager: opt.SessionManager, @@ -106,7 +110,7 @@ func NewController(opt Opt) (*Controller, error) { opt: opt, solver: s, history: hq, - cache: cache, + cache: opt.CacheManager, gatewayForwarder: gatewayForwarder, } c.throttledGC = throttle.After(time.Minute, c.gc) @@ -127,7 +131,7 @@ func (c *Controller) Register(server *grpc.Server) { c.gatewayForwarder.Register(server) tracev1.RegisterTraceServiceServer(server, c) - store := &roContentStore{c.opt.ContentStore} + store := &roContentStore{c.opt.ContentStore.WithFallbackNS(c.opt.ContentStore.Namespace() + "_history")} contentapi.RegisterContentServer(server, contentserver.New(store)) } @@ -170,7 +174,7 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr imageutil.CancelCacheLeases() } - ch := make(chan client.UsageInfo) + ch := make(chan client.UsageInfo, 32) eg, ctx := errgroup.WithContext(stream.Context()) workers, err := c.opt.WorkerController.List() @@ -182,9 +186,9 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr defer func() { if didPrune { if c, ok := c.cache.(interface { - ReleaseUnreferenced() error + ReleaseUnreferenced(context.Context) error }); ok { - if err := c.ReleaseUnreferenced(); err != nil { + if err := c.ReleaseUnreferenced(ctx); err != nil { bklog.G(ctx).Errorf("failed to release cache metadata: %+v", err) } } @@ -212,6 +216,11 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr }) eg2.Go(func() error { + defer func() { + // drain channel on error + for range ch { + } + }() for r := range ch { didPrune = true if err := stream.Send(&controlapi.UsageRecord{ @@ -276,7 +285,7 @@ func (c *Controller) UpdateBuildHistory(ctx context.Context, req *controlapi.Upd return &controlapi.UpdateBuildHistoryResponse{}, err } -func translateLegacySolveRequest(req *controlapi.SolveRequest) error { +func translateLegacySolveRequest(req *controlapi.SolveRequest) { // translates ExportRef and ExportAttrs to new Exports (v0.4.0) if legacyExportRef := req.Cache.ExportRefDeprecated; legacyExportRef != "" { ex := &controlapi.CacheOptionsEntry{ @@ -302,18 +311,13 @@ func translateLegacySolveRequest(req *controlapi.SolveRequest) error { req.Cache.Imports = append(req.Cache.Imports, im) } req.Cache.ImportRefsDeprecated = nil - return nil } func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) { atomic.AddInt64(&c.buildCount, 1) defer atomic.AddInt64(&c.buildCount, -1) - // This method registers job ID in solver.Solve. Make sure there are no blocking calls before that might delay this. - - if err := translateLegacySolveRequest(req); err != nil { - return nil, err - } + translateLegacySolveRequest(req) defer func() { time.AfterFunc(time.Second, c.throttledGC) @@ -329,20 +333,11 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* // if SOURCE_DATE_EPOCH is set, enable it for the exporter if v, ok := epoch.ParseBuildArgs(req.FrontendAttrs); ok { - if _, ok := req.ExporterAttrs[epoch.KeySourceDateEpoch]; !ok { + if _, ok := req.ExporterAttrs[string(exptypes.OptKeySourceDateEpoch)]; !ok { if req.ExporterAttrs == nil { req.ExporterAttrs = make(map[string]string) } - req.ExporterAttrs[epoch.KeySourceDateEpoch] = v - } - } - - if v, ok := req.FrontendAttrs["build-arg:BUILDKIT_BUILDINFO"]; ok && v != "" { - if _, ok := req.ExporterAttrs["buildinfo"]; !ok { - if req.ExporterAttrs == nil { - req.ExporterAttrs = make(map[string]string) - } - req.ExporterAttrs["buildinfo"] = v + req.ExporterAttrs[string(exptypes.OptKeySourceDateEpoch)] = v } } @@ -377,6 +372,10 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* if err != nil { return nil, errors.Wrapf(err, "failed to configure %v cache exporter", e.Type) } + if exp.Exporter == nil { + bklog.G(ctx).Debugf("cache exporter resolver for %v returned nil, skipping exporter", e.Type) + continue + } if exportMode, supported := parseCacheExportMode(e.Attrs["mode"]); !supported { bklog.G(ctx).Debugf("skipping invalid cache export mode: %s", e.Attrs["mode"]) } else { @@ -416,14 +415,19 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* if err != nil { return nil, errors.Wrapf(err, "failed to parse sbom generator %s", src) } + ref = reference.TagNameOnly(ref) useCache := true if v, ok := req.FrontendAttrs["no-cache"]; ok && v == "" { // disable cache if cache is disabled for all stages useCache = false } - ref = reference.TagNameOnly(ref) - procs = append(procs, proc.SBOMProcessor(ref.String(), useCache)) + resolveMode := llb.ResolveModeDefault.String() + if v, ok := req.FrontendAttrs["image-resolve-mode"]; ok { + resolveMode = v + } + + procs = append(procs, proc.SBOMProcessor(ref.String(), useCache, resolveMode)) } if attrs, ok := attests["provenance"]; ok { @@ -462,6 +466,11 @@ func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Con }) eg.Go(func() error { + defer func() { + // drain channel on error + for range ch { + } + }() for { ss, ok := <-ch if !ok { diff --git a/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go index ac195c4315..fa578c6d48 100644 --- a/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go +++ b/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go @@ -21,6 +21,7 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/executor/oci" + resourcestypes "github.com/moby/buildkit/executor/resources/types" gatewayapi "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/snapshot" @@ -78,7 +79,7 @@ func New(client *containerd.Client, root, cgroup string, networkProviders map[pb } } -func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) { +func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (rec resourcestypes.Recorder, err error) { if id == "" { id = identity.NewID() } @@ -105,12 +106,12 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M resolvConf, err := oci.GetResolvConf(ctx, w.root, nil, w.dnsConfig) if err != nil { - return err + return nil, err } hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, nil, meta.Hostname) if err != nil { - return err + return nil, err } if clean != nil { defer clean() @@ -118,12 +119,12 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M mountable, err := root.Src.Mount(ctx, false) if err != nil { - return err + return nil, err } rootMounts, release, err := mountable.Mount() if err != nil { - return err + return nil, err } if release != nil { defer release() @@ -132,14 +133,14 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M lm := snapshot.LocalMounterWithMounts(rootMounts) rootfsPath, err := lm.Mount() if err != nil { - return err + return nil, err } defer lm.Unmount() - defer executor.MountStubsCleaner(rootfsPath, mounts, meta.RemoveMountStubsRecursive)() + defer executor.MountStubsCleaner(ctx, rootfsPath, mounts, meta.RemoveMountStubsRecursive)() uid, gid, sgids, err := oci.GetUser(rootfsPath, meta.User) if err != nil { - return err + return nil, err } identity := idtools.Identity{ @@ -149,21 +150,21 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M newp, err := fs.RootPath(rootfsPath, meta.Cwd) if err != nil { - return errors.Wrapf(err, "working dir %s points to invalid target", newp) + return nil, errors.Wrapf(err, "working dir %s points to invalid target", newp) } if _, err := os.Stat(newp); err != nil { if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil { - return errors.Wrapf(err, "failed to create working directory %s", newp) + return nil, errors.Wrapf(err, "failed to create working directory %s", newp) } } provider, ok := w.networkProviders[meta.NetMode] if !ok { - return errors.Errorf("unknown network mode %s", meta.NetMode) + return nil, errors.Errorf("unknown network mode %s", meta.NetMode) } namespace, err := provider.New(ctx, meta.Hostname) if err != nil { - return err + return nil, err } defer namespace.Close() @@ -179,13 +180,13 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M processMode := oci.ProcessSandbox // FIXME(AkihiroSuda) spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.selinux, w.traceSocket, opts...) if err != nil { - return err + return nil, err } defer cleanup() spec.Process.Terminal = meta.Tty if w.rootless { if err := rootlessspecconv.ToRootless(spec); err != nil { - return err + return nil, err } } @@ -193,7 +194,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M containerd.WithSpec(spec), ) if err != nil { - return err + return nil, err } defer func() { @@ -214,7 +215,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M Options: []string{"rbind"}, }})) if err != nil { - return err + return nil, err } defer func() { @@ -225,7 +226,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M if nn, ok := namespace.(OnCreateRuntimer); ok { if err := nn.OnCreateRuntime(task.Pid()); err != nil { - return err + return nil, err } } @@ -238,7 +239,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M } }) }) - return err + return nil, err } func (w *containerdExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) (err error) { diff --git a/vendor/github.com/moby/buildkit/executor/executor.go b/vendor/github.com/moby/buildkit/executor/executor.go index a323bcc9cc..741f347cd9 100644 --- a/vendor/github.com/moby/buildkit/executor/executor.go +++ b/vendor/github.com/moby/buildkit/executor/executor.go @@ -6,6 +6,7 @@ import ( "net" "syscall" + resourcestypes "github.com/moby/buildkit/executor/resources/types" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/pb" ) @@ -55,7 +56,7 @@ type Executor interface { // Run will start a container for the given process with rootfs, mounts. // `id` is an optional name for the container so it can be referenced later via Exec. // `started` is an optional channel that will be closed when the container setup completes and has started running. - Run(ctx context.Context, id string, rootfs Mount, mounts []Mount, process ProcessInfo, started chan<- struct{}) error + Run(ctx context.Context, id string, rootfs Mount, mounts []Mount, process ProcessInfo, started chan<- struct{}) (resourcestypes.Recorder, error) // Exec will start a process in container matching `id`. An error will be returned // if the container failed to start (via Run) or has exited before Exec is called. Exec(ctx context.Context, id string, process ProcessInfo) error diff --git a/vendor/github.com/moby/buildkit/executor/oci/hosts.go b/vendor/github.com/moby/buildkit/executor/oci/hosts.go index 0d193555c9..0de29f8a8d 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/hosts.go +++ b/vendor/github.com/moby/buildkit/executor/oci/hosts.go @@ -20,9 +20,9 @@ func GetHostsFile(ctx context.Context, stateDir string, extraHosts []executor.Ho return makeHostsFile(stateDir, extraHosts, idmap, hostname) } - _, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) { + _, err := g.Do(ctx, stateDir, func(ctx context.Context) (struct{}, error) { _, _, err := makeHostsFile(stateDir, nil, idmap, hostname) - return nil, err + return struct{}{}, err }) if err != nil { return "", nil, err diff --git a/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go b/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go index 3ac0feda7a..9db0b3dfaa 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go +++ b/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go @@ -11,12 +11,12 @@ import ( "github.com/pkg/errors" ) -var g flightcontrol.Group +var g flightcontrol.Group[struct{}] var notFirstRun bool var lastNotEmpty bool // overridden by tests -var resolvconfGet = resolvconf.Get +var resolvconfPath = resolvconf.Path type DNSConfig struct { Nameservers []string @@ -26,7 +26,7 @@ type DNSConfig struct { func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.IdentityMapping, dns *DNSConfig) (string, error) { p := filepath.Join(stateDir, "resolv.conf") - _, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) { + _, err := g.Do(ctx, stateDir, func(ctx context.Context) (struct{}, error) { generate := !notFirstRun notFirstRun = true @@ -34,15 +34,15 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity fi, err := os.Stat(p) if err != nil { if !errors.Is(err, os.ErrNotExist) { - return "", err + return struct{}{}, err } generate = true } if !generate { - fiMain, err := os.Stat(resolvconf.Path()) + fiMain, err := os.Stat(resolvconfPath()) if err != nil { if !errors.Is(err, os.ErrNotExist) { - return nil, err + return struct{}{}, err } if lastNotEmpty { generate = true @@ -57,63 +57,59 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity } if !generate { - return "", nil + return struct{}{}, nil } - var dt []byte - f, err := resolvconfGet() - if err != nil { - if !errors.Is(err, os.ErrNotExist) { - return "", err - } - } else { - dt = f.Content + dt, err := os.ReadFile(resolvconfPath()) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return struct{}{}, err } + var f *resolvconf.File + tmpPath := p + ".tmp" if dns != nil { var ( - dnsNameservers = resolvconf.GetNameservers(dt, resolvconf.IP) - dnsSearchDomains = resolvconf.GetSearchDomains(dt) - dnsOptions = resolvconf.GetOptions(dt) - ) - if len(dns.Nameservers) > 0 { - dnsNameservers = dns.Nameservers - } - if len(dns.SearchDomains) > 0 { + dnsNameservers = dns.Nameservers dnsSearchDomains = dns.SearchDomains + dnsOptions = dns.Options + ) + if len(dns.Nameservers) == 0 { + dnsNameservers = resolvconf.GetNameservers(dt, resolvconf.IP) } - if len(dns.Options) > 0 { - dnsOptions = dns.Options + if len(dns.SearchDomains) == 0 { + dnsSearchDomains = resolvconf.GetSearchDomains(dt) + } + if len(dns.Options) == 0 { + dnsOptions = resolvconf.GetOptions(dt) } - f, err = resolvconf.Build(p+".tmp", dnsNameservers, dnsSearchDomains, dnsOptions) + f, err = resolvconf.Build(tmpPath, dnsNameservers, dnsSearchDomains, dnsOptions) if err != nil { - return "", err + return struct{}{}, err } dt = f.Content } f, err = resolvconf.FilterResolvDNS(dt, true) if err != nil { - return "", err + return struct{}{}, err } - tmpPath := p + ".tmp" if err := os.WriteFile(tmpPath, f.Content, 0644); err != nil { - return "", err + return struct{}{}, err } if idmap != nil { root := idmap.RootPair() if err := os.Chown(tmpPath, root.UID, root.GID); err != nil { - return "", err + return struct{}{}, err } } if err := os.Rename(tmpPath, p); err != nil { - return "", err + return struct{}{}, err } - return "", nil + return struct{}{}, nil }) if err != nil { return "", err diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec.go b/vendor/github.com/moby/buildkit/executor/oci/spec.go index f825b1dce7..c6d665b081 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec.go @@ -37,6 +37,12 @@ const ( NoProcessSandbox ) +var tracingEnvVars = []string{ + "OTEL_TRACES_EXPORTER=otlp", + "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=" + getTracingSocket(), + "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL=grpc", +} + func (pm ProcessMode) String() string { switch pm { case ProcessSandbox: @@ -114,7 +120,7 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou if tracingSocket != "" { // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md - meta.Env = append(meta.Env, "OTEL_TRACES_EXPORTER=otlp", "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=unix:///dev/otel-grpc.sock", "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL=grpc") + meta.Env = append(meta.Env, tracingEnvVars...) meta.Env = append(meta.Env, traceexec.Environ(ctx)...) } @@ -131,6 +137,12 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou return nil, nil, err } + if cgroupNamespaceSupported() { + s.Linux.Namespaces = append(s.Linux.Namespaces, specs.LinuxNamespace{ + Type: specs.CgroupNamespace, + }) + } + if len(meta.Ulimit) == 0 { // reset open files limit s.Process.Rlimits = nil @@ -185,12 +197,7 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou } if tracingSocket != "" { - s.Mounts = append(s.Mounts, specs.Mount{ - Destination: "/dev/otel-grpc.sock", - Type: "bind", - Source: tracingSocket, - Options: []string{"ro", "rbind"}, - }) + s.Mounts = append(s.Mounts, getTracingSocketMount(tracingSocket)) } s.Mounts = dedupMounts(s.Mounts) diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go b/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go index f906f79b6b..97e95e9834 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go @@ -6,7 +6,9 @@ package oci import ( "context" "fmt" + "os" "strings" + "sync" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/oci" @@ -21,6 +23,15 @@ import ( "github.com/pkg/errors" ) +var ( + cgroupNSOnce sync.Once + supportsCgroupNS bool +) + +const ( + tracingSocketPath = "/dev/otel-grpc.sock" +) + func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) { return []oci.SpecOpts{ // https://github.com/moby/buildkit/issues/429 @@ -122,3 +133,25 @@ func withDefaultProfile() oci.SpecOpts { return err } } + +func getTracingSocketMount(socket string) specs.Mount { + return specs.Mount{ + Destination: tracingSocketPath, + Type: "bind", + Source: socket, + Options: []string{"ro", "rbind"}, + } +} + +func getTracingSocket() string { + return fmt.Sprintf("unix://%s", tracingSocketPath) +} + +func cgroupNamespaceSupported() bool { + cgroupNSOnce.Do(func() { + if _, err := os.Stat("/proc/self/ns/cgroup"); !os.IsNotExist(err) { + supportsCgroupNS = true + } + }) + return supportsCgroupNS +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go b/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go index 48b0969e39..83ee278187 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go @@ -4,12 +4,20 @@ package oci import ( + "fmt" + "path/filepath" + "github.com/containerd/containerd/oci" "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/solver/pb" + specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" ) +const ( + tracingSocketPath = "//./pipe/otel-grpc" +) + func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) { return nil, nil } @@ -43,3 +51,19 @@ func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) { } return nil, errors.New("no support for POSIXRlimit on Windows") } + +func getTracingSocketMount(socket string) specs.Mount { + return specs.Mount{ + Destination: filepath.FromSlash(tracingSocketPath), + Source: socket, + Options: []string{"ro"}, + } +} + +func getTracingSocket() string { + return fmt.Sprintf("npipe://%s", filepath.ToSlash(tracingSocketPath)) +} + +func cgroupNamespaceSupported() bool { + return false +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/cpu.go b/vendor/github.com/moby/buildkit/executor/resources/cpu.go new file mode 100644 index 0000000000..53d31f477f --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/cpu.go @@ -0,0 +1,141 @@ +package resources + +import ( + "bufio" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + + "github.com/moby/buildkit/executor/resources/types" + "github.com/pkg/errors" +) + +const ( + cpuUsageUsec = "usage_usec" + cpuUserUsec = "user_usec" + cpuSystemUsec = "system_usec" + cpuNrPeriods = "nr_periods" + cpuNrThrottled = "nr_throttled" + cpuThrottledUsec = "throttled_usec" +) + +func getCgroupCPUStat(cgroupPath string) (*types.CPUStat, error) { + cpuStat := &types.CPUStat{} + + // Read cpu.stat file + cpuStatFile, err := os.Open(filepath.Join(cgroupPath, "cpu.stat")) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil, nil + } + return nil, err + } + defer cpuStatFile.Close() + + scanner := bufio.NewScanner(cpuStatFile) + for scanner.Scan() { + line := scanner.Text() + fields := strings.Fields(line) + + if len(fields) < 2 { + continue + } + + key := fields[0] + value, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + + switch key { + case cpuUsageUsec: + cpuStat.UsageNanos = uint64Ptr(value * 1000) + case cpuUserUsec: + cpuStat.UserNanos = uint64Ptr(value * 1000) + case cpuSystemUsec: + cpuStat.SystemNanos = uint64Ptr(value * 1000) + case cpuNrPeriods: + cpuStat.NrPeriods = new(uint32) + *cpuStat.NrPeriods = uint32(value) + case cpuNrThrottled: + cpuStat.NrThrottled = new(uint32) + *cpuStat.NrThrottled = uint32(value) + case cpuThrottledUsec: + cpuStat.ThrottledNanos = uint64Ptr(value * 1000) + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + // Read cpu.pressure file + pressure, err := parsePressureFile(filepath.Join(cgroupPath, "cpu.pressure")) + if err == nil { + cpuStat.Pressure = pressure + } + + return cpuStat, nil +} +func parsePressureFile(filename string) (*types.Pressure, error) { + content, err := os.ReadFile(filename) + if err != nil { + if errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTSUP) { // pressure file requires CONFIG_PSI + return nil, nil + } + return nil, err + } + + lines := strings.Split(string(content), "\n") + + pressure := &types.Pressure{} + for _, line := range lines { + // Skip empty lines + if len(strings.TrimSpace(line)) == 0 { + continue + } + + fields := strings.Fields(line) + prefix := fields[0] + pressureValues := &types.PressureValues{} + + for i := 1; i < len(fields); i++ { + keyValue := strings.Split(fields[i], "=") + key := keyValue[0] + valueStr := keyValue[1] + + if key == "total" { + totalValue, err := strconv.ParseUint(valueStr, 10, 64) + if err != nil { + return nil, err + } + pressureValues.Total = &totalValue + } else { + value, err := strconv.ParseFloat(valueStr, 64) + if err != nil { + return nil, err + } + + switch key { + case "avg10": + pressureValues.Avg10 = &value + case "avg60": + pressureValues.Avg60 = &value + case "avg300": + pressureValues.Avg300 = &value + } + } + } + + switch prefix { + case "some": + pressure.Some = pressureValues + case "full": + pressure.Full = pressureValues + } + } + + return pressure, nil +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/io.go b/vendor/github.com/moby/buildkit/executor/resources/io.go new file mode 100644 index 0000000000..be56d76375 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/io.go @@ -0,0 +1,117 @@ +package resources + +import ( + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/moby/buildkit/executor/resources/types" + "github.com/pkg/errors" +) + +const ( + ioStatFile = "io.stat" + ioPressureFile = "io.pressure" +) + +const ( + ioReadBytes = "rbytes" + ioWriteBytes = "wbytes" + ioDiscardBytes = "dbytes" + ioReadIOs = "rios" + ioWriteIOs = "wios" + ioDiscardIOs = "dios" +) + +func getCgroupIOStat(cgroupPath string) (*types.IOStat, error) { + ioStatPath := filepath.Join(cgroupPath, ioStatFile) + data, err := os.ReadFile(ioStatPath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil, nil + } + return nil, errors.Wrapf(err, "failed to read %s", ioStatPath) + } + + ioStat := &types.IOStat{} + lines := strings.Split(string(data), "\n") + for _, line := range lines { + parts := strings.Fields(line) + if len(parts) < 2 { + continue + } + + for _, part := range parts[1:] { + key, value := parseKeyValue(part) + if key == "" { + continue + } + + switch key { + case ioReadBytes: + if ioStat.ReadBytes != nil { + *ioStat.ReadBytes += value + } else { + ioStat.ReadBytes = uint64Ptr(value) + } + case ioWriteBytes: + if ioStat.WriteBytes != nil { + *ioStat.WriteBytes += value + } else { + ioStat.WriteBytes = uint64Ptr(value) + } + case ioDiscardBytes: + if ioStat.DiscardBytes != nil { + *ioStat.DiscardBytes += value + } else { + ioStat.DiscardBytes = uint64Ptr(value) + } + case ioReadIOs: + if ioStat.ReadIOs != nil { + *ioStat.ReadIOs += value + } else { + ioStat.ReadIOs = uint64Ptr(value) + } + case ioWriteIOs: + if ioStat.WriteIOs != nil { + *ioStat.WriteIOs += value + } else { + ioStat.WriteIOs = uint64Ptr(value) + } + case ioDiscardIOs: + if ioStat.DiscardIOs != nil { + *ioStat.DiscardIOs += value + } else { + ioStat.DiscardIOs = uint64Ptr(value) + } + } + } + } + + // Parse the pressure + pressure, err := parsePressureFile(filepath.Join(cgroupPath, ioPressureFile)) + if err != nil { + return nil, err + } + ioStat.Pressure = pressure + + return ioStat, nil +} + +func parseKeyValue(kv string) (key string, value uint64) { + parts := strings.SplitN(kv, "=", 2) + if len(parts) != 2 { + return "", 0 + } + key = parts[0] + value, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return "", 0 + } + return key, value +} + +func uint64Ptr(v uint64) *uint64 { + return &v +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/memory.go b/vendor/github.com/moby/buildkit/executor/resources/memory.go new file mode 100644 index 0000000000..775f0f8dae --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/memory.go @@ -0,0 +1,159 @@ +package resources + +import ( + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/moby/buildkit/executor/resources/types" + "github.com/pkg/errors" +) + +const ( + memoryStatFile = "memory.stat" + memoryPressureFile = "memory.pressure" + memoryPeakFile = "memory.peak" + memorySwapCurrentFile = "memory.swap.current" + memoryEventsFile = "memory.events" +) + +const ( + memoryAnon = "anon" + memoryFile = "file" + memoryKernelStack = "kernel_stack" + memoryPageTables = "pagetables" + memorySock = "sock" + memoryShmem = "shmem" + memoryFileMapped = "file_mapped" + memoryFileDirty = "file_dirty" + memoryFileWriteback = "file_writeback" + memorySlab = "slab" + memoryPgscan = "pgscan" + memoryPgsteal = "pgsteal" + memoryPgfault = "pgfault" + memoryPgmajfault = "pgmajfault" + + memoryLow = "low" + memoryHigh = "high" + memoryMax = "max" + memoryOom = "oom" + memoryOomKill = "oom_kill" +) + +func getCgroupMemoryStat(path string) (*types.MemoryStat, error) { + memoryStat := &types.MemoryStat{} + + // Parse memory.stat + err := parseKeyValueFile(filepath.Join(path, memoryStatFile), func(key string, value uint64) { + switch key { + case memoryAnon: + memoryStat.Anon = &value + case memoryFile: + memoryStat.File = &value + case memoryKernelStack: + memoryStat.KernelStack = &value + case memoryPageTables: + memoryStat.PageTables = &value + case memorySock: + memoryStat.Sock = &value + case memoryShmem: + memoryStat.Shmem = &value + case memoryFileMapped: + memoryStat.FileMapped = &value + case memoryFileDirty: + memoryStat.FileDirty = &value + case memoryFileWriteback: + memoryStat.FileWriteback = &value + case memorySlab: + memoryStat.Slab = &value + case memoryPgscan: + memoryStat.Pgscan = &value + case memoryPgsteal: + memoryStat.Pgsteal = &value + case memoryPgfault: + memoryStat.Pgfault = &value + case memoryPgmajfault: + memoryStat.Pgmajfault = &value + } + }) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil, nil + } + return nil, err + } + + pressure, err := parsePressureFile(filepath.Join(path, memoryPressureFile)) + if err != nil { + return nil, err + } + if pressure != nil { + memoryStat.Pressure = pressure + } + + err = parseKeyValueFile(filepath.Join(path, memoryEventsFile), func(key string, value uint64) { + switch key { + case memoryLow: + memoryStat.LowEvents = value + case memoryHigh: + memoryStat.HighEvents = value + case memoryMax: + memoryStat.MaxEvents = value + case memoryOom: + memoryStat.OomEvents = value + case memoryOomKill: + memoryStat.OomKillEvents = value + } + }) + + if err != nil { + return nil, err + } + + peak, err := parseSingleValueFile(filepath.Join(path, memoryPeakFile)) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return nil, err + } + } else { + memoryStat.Peak = &peak + } + + swap, err := parseSingleValueFile(filepath.Join(path, memorySwapCurrentFile)) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return nil, err + } + } else { + memoryStat.SwapBytes = &swap + } + + return memoryStat, nil +} + +func parseKeyValueFile(filePath string, callback func(key string, value uint64)) error { + content, err := os.ReadFile(filePath) + if err != nil { + return errors.Wrapf(err, "failed to read %s", filePath) + } + + lines := strings.Split(string(content), "\n") + for _, line := range lines { + if len(strings.TrimSpace(line)) == 0 { + continue + } + + fields := strings.Fields(line) + key := fields[0] + valueStr := fields[1] + value, err := strconv.ParseUint(valueStr, 10, 64) + if err != nil { + return errors.Wrapf(err, "failed to parse value for %s", key) + } + + callback(key, value) + } + + return nil +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/monitor.go b/vendor/github.com/moby/buildkit/executor/resources/monitor.go new file mode 100644 index 0000000000..95b954bcbe --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/monitor.go @@ -0,0 +1,287 @@ +package resources + +import ( + "bufio" + "context" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/moby/buildkit/executor/resources/types" + "github.com/moby/buildkit/util/network" + "github.com/prometheus/procfs" + "github.com/sirupsen/logrus" +) + +const ( + cgroupProcsFile = "cgroup.procs" + cgroupControllersFile = "cgroup.controllers" + cgroupSubtreeFile = "cgroup.subtree_control" + defaultMountpoint = "/sys/fs/cgroup" + initGroup = "init" +) + +var initOnce sync.Once +var isCgroupV2 bool + +type cgroupRecord struct { + once sync.Once + ns string + sampler *Sub[*types.Sample] + closeSampler func() error + samples []*types.Sample + err error + done chan struct{} + monitor *Monitor + netSampler NetworkSampler + startCPUStat *procfs.CPUStat + sysCPUStat *types.SysCPUStat +} + +func (r *cgroupRecord) Wait() error { + go r.close() + <-r.done + return r.err +} + +func (r *cgroupRecord) Start() { + if stat, err := r.monitor.proc.Stat(); err == nil { + r.startCPUStat = &stat.CPUTotal + } + s := NewSampler(2*time.Second, 10, r.sample) + r.sampler = s.Record() + r.closeSampler = s.Close +} + +func (r *cgroupRecord) Close() { + r.close() +} + +func (r *cgroupRecord) CloseAsync(next func(context.Context) error) error { + go func() { + r.close() + next(context.TODO()) + }() + return nil +} + +func (r *cgroupRecord) close() { + r.once.Do(func() { + defer close(r.done) + go func() { + r.monitor.mu.Lock() + delete(r.monitor.records, r.ns) + r.monitor.mu.Unlock() + }() + if r.sampler == nil { + return + } + s, err := r.sampler.Close(true) + if err != nil { + r.err = err + } else { + r.samples = s + } + r.closeSampler() + + if r.startCPUStat != nil { + stat, err := r.monitor.proc.Stat() + if err == nil { + cpu := &types.SysCPUStat{ + User: stat.CPUTotal.User - r.startCPUStat.User, + Nice: stat.CPUTotal.Nice - r.startCPUStat.Nice, + System: stat.CPUTotal.System - r.startCPUStat.System, + Idle: stat.CPUTotal.Idle - r.startCPUStat.Idle, + Iowait: stat.CPUTotal.Iowait - r.startCPUStat.Iowait, + IRQ: stat.CPUTotal.IRQ - r.startCPUStat.IRQ, + SoftIRQ: stat.CPUTotal.SoftIRQ - r.startCPUStat.SoftIRQ, + Steal: stat.CPUTotal.Steal - r.startCPUStat.Steal, + Guest: stat.CPUTotal.Guest - r.startCPUStat.Guest, + GuestNice: stat.CPUTotal.GuestNice - r.startCPUStat.GuestNice, + } + r.sysCPUStat = cpu + } + } + }) +} + +func (r *cgroupRecord) sample(tm time.Time) (*types.Sample, error) { + cpu, err := getCgroupCPUStat(filepath.Join(defaultMountpoint, r.ns)) + if err != nil { + return nil, err + } + memory, err := getCgroupMemoryStat(filepath.Join(defaultMountpoint, r.ns)) + if err != nil { + return nil, err + } + io, err := getCgroupIOStat(filepath.Join(defaultMountpoint, r.ns)) + if err != nil { + return nil, err + } + pids, err := getCgroupPIDsStat(filepath.Join(defaultMountpoint, r.ns)) + if err != nil { + return nil, err + } + sample := &types.Sample{ + Timestamp_: tm, + CPUStat: cpu, + MemoryStat: memory, + IOStat: io, + PIDsStat: pids, + } + if r.netSampler != nil { + net, err := r.netSampler.Sample() + if err != nil { + return nil, err + } + sample.NetStat = net + } + return sample, nil +} + +func (r *cgroupRecord) Samples() (*types.Samples, error) { + <-r.done + if r.err != nil { + return nil, r.err + } + return &types.Samples{ + Samples: r.samples, + SysCPUStat: r.sysCPUStat, + }, nil +} + +type nopRecord struct { +} + +func (r *nopRecord) Wait() error { + return nil +} + +func (r *nopRecord) Samples() (*types.Samples, error) { + return nil, nil +} + +func (r *nopRecord) Close() { +} + +func (r *nopRecord) CloseAsync(next func(context.Context) error) error { + return next(context.TODO()) +} + +func (r *nopRecord) Start() { +} + +type Monitor struct { + mu sync.Mutex + closed chan struct{} + records map[string]*cgroupRecord + proc procfs.FS +} + +type NetworkSampler interface { + Sample() (*network.Sample, error) +} + +type RecordOpt struct { + NetworkSampler NetworkSampler +} + +func (m *Monitor) RecordNamespace(ns string, opt RecordOpt) (types.Recorder, error) { + isClosed := false + select { + case <-m.closed: + isClosed = true + default: + } + if !isCgroupV2 || isClosed { + return &nopRecord{}, nil + } + r := &cgroupRecord{ + ns: ns, + done: make(chan struct{}), + monitor: m, + netSampler: opt.NetworkSampler, + } + m.mu.Lock() + m.records[ns] = r + m.mu.Unlock() + return r, nil +} + +func (m *Monitor) Close() error { + close(m.closed) + m.mu.Lock() + defer m.mu.Unlock() + + for _, r := range m.records { + r.close() + } + return nil +} + +func NewMonitor() (*Monitor, error) { + initOnce.Do(func() { + isCgroupV2 = isCgroup2() + if !isCgroupV2 { + return + } + if err := prepareCgroupControllers(); err != nil { + logrus.Warnf("failed to prepare cgroup controllers: %+v", err) + } + }) + + fs, err := procfs.NewDefaultFS() + if err != nil { + return nil, err + } + + return &Monitor{ + closed: make(chan struct{}), + records: make(map[string]*cgroupRecord), + proc: fs, + }, nil +} + +func prepareCgroupControllers() error { + v, ok := os.LookupEnv("BUILDKIT_SETUP_CGROUPV2_ROOT") + if !ok { + return nil + } + if b, _ := strconv.ParseBool(v); !b { + return nil + } + // move current process to init cgroup + if err := os.MkdirAll(filepath.Join(defaultMountpoint, initGroup), 0755); err != nil { + return err + } + f, err := os.OpenFile(filepath.Join(defaultMountpoint, cgroupProcsFile), os.O_RDONLY, 0) + if err != nil { + return err + } + s := bufio.NewScanner(f) + for s.Scan() { + if err := os.WriteFile(filepath.Join(defaultMountpoint, initGroup, cgroupProcsFile), s.Bytes(), 0); err != nil { + return err + } + } + if err := f.Close(); err != nil { + return err + } + dt, err := os.ReadFile(filepath.Join(defaultMountpoint, cgroupControllersFile)) + if err != nil { + return err + } + for _, c := range strings.Split(string(dt), " ") { + if c == "" { + continue + } + if err := os.WriteFile(filepath.Join(defaultMountpoint, cgroupSubtreeFile), []byte("+"+c), 0); err != nil { + // ignore error + logrus.Warnf("failed to enable cgroup controller %q: %+v", c, err) + } + } + return nil +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/monitor_linux.go b/vendor/github.com/moby/buildkit/executor/resources/monitor_linux.go new file mode 100644 index 0000000000..aefc2adce7 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/monitor_linux.go @@ -0,0 +1,15 @@ +//go:build linux +// +build linux + +package resources + +import "golang.org/x/sys/unix" + +func isCgroup2() bool { + var st unix.Statfs_t + err := unix.Statfs(defaultMountpoint, &st) + if err != nil { + return false + } + return st.Type == unix.CGROUP2_SUPER_MAGIC +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/monitor_nolinux.go b/vendor/github.com/moby/buildkit/executor/resources/monitor_nolinux.go new file mode 100644 index 0000000000..20a50a648c --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/monitor_nolinux.go @@ -0,0 +1,8 @@ +//go:build !linux +// +build !linux + +package resources + +func isCgroup2() bool { + return false +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/pids.go b/vendor/github.com/moby/buildkit/executor/resources/pids.go new file mode 100644 index 0000000000..88493d805e --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/pids.go @@ -0,0 +1,45 @@ +package resources + +import ( + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/moby/buildkit/executor/resources/types" + "github.com/pkg/errors" +) + +const ( + pidsCurrentFile = "pids.current" +) + +func getCgroupPIDsStat(path string) (*types.PIDsStat, error) { + pidsStat := &types.PIDsStat{} + + v, err := parseSingleValueFile(filepath.Join(path, pidsCurrentFile)) + if err != nil { + if !errors.Is(err, os.ErrNotExist) { + return nil, err + } + } else { + pidsStat.Current = &v + } + + return pidsStat, nil +} + +func parseSingleValueFile(filePath string) (uint64, error) { + content, err := os.ReadFile(filePath) + if err != nil { + return 0, errors.Wrapf(err, "failed to read %s", filePath) + } + + valueStr := strings.TrimSpace(string(content)) + value, err := strconv.ParseUint(valueStr, 10, 64) + if err != nil { + return 0, errors.Wrapf(err, "failed to parse value: %s", valueStr) + } + + return value, nil +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/sampler.go b/vendor/github.com/moby/buildkit/executor/resources/sampler.go new file mode 100644 index 0000000000..38e94812da --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/sampler.go @@ -0,0 +1,139 @@ +package resources + +import ( + "sync" + "time" +) + +type WithTimestamp interface { + Timestamp() time.Time +} + +type Sampler[T WithTimestamp] struct { + mu sync.Mutex + minInterval time.Duration + maxSamples int + callback func(ts time.Time) (T, error) + doneOnce sync.Once + done chan struct{} + running bool + subs map[*Sub[T]]struct{} +} + +type Sub[T WithTimestamp] struct { + sampler *Sampler[T] + interval time.Duration + first time.Time + last time.Time + samples []T + err error +} + +func (s *Sub[T]) Close(captureLast bool) ([]T, error) { + s.sampler.mu.Lock() + delete(s.sampler.subs, s) + + if s.err != nil { + s.sampler.mu.Unlock() + return nil, s.err + } + current := s.first + out := make([]T, 0, len(s.samples)+1) + for i, v := range s.samples { + ts := v.Timestamp() + if i == 0 || ts.Sub(current) >= s.interval { + out = append(out, v) + current = ts + } + } + s.sampler.mu.Unlock() + + if captureLast { + v, err := s.sampler.callback(time.Now()) + if err != nil { + return nil, err + } + out = append(out, v) + } + + return out, nil +} + +func NewSampler[T WithTimestamp](minInterval time.Duration, maxSamples int, cb func(time.Time) (T, error)) *Sampler[T] { + s := &Sampler[T]{ + minInterval: minInterval, + maxSamples: maxSamples, + callback: cb, + done: make(chan struct{}), + subs: make(map[*Sub[T]]struct{}), + } + return s +} + +func (s *Sampler[T]) Record() *Sub[T] { + ss := &Sub[T]{ + interval: s.minInterval, + first: time.Now(), + sampler: s, + } + s.mu.Lock() + s.subs[ss] = struct{}{} + if !s.running { + s.running = true + go s.run() + } + s.mu.Unlock() + return ss +} + +func (s *Sampler[T]) run() { + ticker := time.NewTimer(s.minInterval) + for { + select { + case <-s.done: + ticker.Stop() + return + case <-ticker.C: + tm := time.Now() + s.mu.Lock() + active := make([]*Sub[T], 0, len(s.subs)) + for ss := range s.subs { + if tm.Sub(ss.last) < ss.interval { + continue + } + ss.last = tm + active = append(active, ss) + } + s.mu.Unlock() + ticker = time.NewTimer(s.minInterval) + if len(active) == 0 { + continue + } + value, err := s.callback(tm) + s.mu.Lock() + for _, ss := range active { + if _, found := s.subs[ss]; !found { + continue // skip if Close() was called while the lock was released + } + if err != nil { + ss.err = err + } else { + ss.samples = append(ss.samples, value) + ss.err = nil + } + dur := ss.last.Sub(ss.first) + if time.Duration(ss.interval)*time.Duration(s.maxSamples) <= dur { + ss.interval *= 2 + } + } + s.mu.Unlock() + } + } +} + +func (s *Sampler[T]) Close() error { + s.doneOnce.Do(func() { + close(s.done) + }) + return nil +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/sys.go b/vendor/github.com/moby/buildkit/executor/resources/sys.go new file mode 100644 index 0000000000..7082517adc --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/sys.go @@ -0,0 +1,9 @@ +package resources + +import "github.com/moby/buildkit/executor/resources/types" + +type SysSampler = Sub[*types.SysSample] + +func NewSysSampler() (*Sampler[*types.SysSample], error) { + return newSysSampler() +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/sys_linux.go b/vendor/github.com/moby/buildkit/executor/resources/sys_linux.go new file mode 100644 index 0000000000..d7835137ba --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/sys_linux.go @@ -0,0 +1,93 @@ +package resources + +import ( + "os" + "time" + + "github.com/moby/buildkit/executor/resources/types" + "github.com/prometheus/procfs" +) + +func newSysSampler() (*Sampler[*types.SysSample], error) { + pfs, err := procfs.NewDefaultFS() + if err != nil { + return nil, err + } + + return NewSampler(2*time.Second, 20, func(tm time.Time) (*types.SysSample, error) { + return sampleSys(pfs, tm) + }), nil +} + +func sampleSys(proc procfs.FS, tm time.Time) (*types.SysSample, error) { + stat, err := proc.Stat() + if err != nil { + return nil, err + } + + s := &types.SysSample{ + Timestamp_: tm, + } + + s.CPUStat = &types.SysCPUStat{ + User: stat.CPUTotal.User, + Nice: stat.CPUTotal.Nice, + System: stat.CPUTotal.System, + Idle: stat.CPUTotal.Idle, + Iowait: stat.CPUTotal.Iowait, + IRQ: stat.CPUTotal.IRQ, + SoftIRQ: stat.CPUTotal.SoftIRQ, + Steal: stat.CPUTotal.Steal, + Guest: stat.CPUTotal.Guest, + GuestNice: stat.CPUTotal.GuestNice, + } + + s.ProcStat = &types.ProcStat{ + ContextSwitches: stat.ContextSwitches, + ProcessCreated: stat.ProcessCreated, + ProcessesRunning: stat.ProcessesRunning, + } + + mem, err := proc.Meminfo() + if err != nil { + return nil, err + } + + s.MemoryStat = &types.SysMemoryStat{ + Total: mem.MemTotal, + Free: mem.MemFree, + Buffers: mem.Buffers, + Cached: mem.Cached, + Active: mem.Active, + Inactive: mem.Inactive, + Swap: mem.SwapTotal, + Available: mem.MemAvailable, + Dirty: mem.Dirty, + Writeback: mem.Writeback, + Slab: mem.Slab, + } + + if _, err := os.Lstat("/proc/pressure"); err != nil { + return s, nil + } + + cp, err := parsePressureFile("/proc/pressure/cpu") + if err != nil { + return nil, err + } + s.CPUPressure = cp + + mp, err := parsePressureFile("/proc/pressure/memory") + if err != nil { + return nil, err + } + s.MemoryPressure = mp + + ip, err := parsePressureFile("/proc/pressure/io") + if err != nil { + return nil, err + } + s.IOPressure = ip + + return s, nil +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/sys_nolinux.go b/vendor/github.com/moby/buildkit/executor/resources/sys_nolinux.go new file mode 100644 index 0000000000..dd0da8582e --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/sys_nolinux.go @@ -0,0 +1,9 @@ +//go:build !linux + +package resources + +import "github.com/moby/buildkit/executor/resources/types" + +func newSysSampler() (*Sampler[*types.SysSample], error) { + return nil, nil +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/types/systypes.go b/vendor/github.com/moby/buildkit/executor/resources/types/systypes.go new file mode 100644 index 0000000000..56db46945b --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/types/systypes.go @@ -0,0 +1,72 @@ +package types + +import ( + "encoding/json" + "math" + "time" +) + +type SysCPUStat struct { + User float64 `json:"user"` + Nice float64 `json:"nice"` + System float64 `json:"system"` + Idle float64 `json:"idle"` + Iowait float64 `json:"iowait"` + IRQ float64 `json:"irq"` + SoftIRQ float64 `json:"softirq"` + Steal float64 `json:"steal"` + Guest float64 `json:"guest"` + GuestNice float64 `json:"guestNice"` +} + +type sysCPUStatAlias SysCPUStat // avoid recursion of MarshalJSON + +func (s SysCPUStat) MarshalJSON() ([]byte, error) { + return json.Marshal(sysCPUStatAlias{ + User: math.Round(s.User*1000) / 1000, + Nice: math.Round(s.Nice*1000) / 1000, + System: math.Round(s.System*1000) / 1000, + Idle: math.Round(s.Idle*1000) / 1000, + Iowait: math.Round(s.Iowait*1000) / 1000, + IRQ: math.Round(s.IRQ*1000) / 1000, + SoftIRQ: math.Round(s.SoftIRQ*1000) / 1000, + Steal: math.Round(s.Steal*1000) / 1000, + Guest: math.Round(s.Guest*1000) / 1000, + GuestNice: math.Round(s.GuestNice*1000) / 1000, + }) +} + +type ProcStat struct { + ContextSwitches uint64 `json:"contextSwitches"` + ProcessCreated uint64 `json:"processCreated"` + ProcessesRunning uint64 `json:"processesRunning"` +} + +type SysMemoryStat struct { + Total *uint64 `json:"total"` + Free *uint64 `json:"free"` + Available *uint64 `json:"available"` + Buffers *uint64 `json:"buffers"` + Cached *uint64 `json:"cached"` + Active *uint64 `json:"active"` + Inactive *uint64 `json:"inactive"` + Swap *uint64 `json:"swap"` + Dirty *uint64 `json:"dirty"` + Writeback *uint64 `json:"writeback"` + Slab *uint64 `json:"slab"` +} + +type SysSample struct { + //nolint + Timestamp_ time.Time `json:"timestamp"` + CPUStat *SysCPUStat `json:"cpuStat,omitempty"` + ProcStat *ProcStat `json:"procStat,omitempty"` + MemoryStat *SysMemoryStat `json:"memoryStat,omitempty"` + CPUPressure *Pressure `json:"cpuPressure,omitempty"` + MemoryPressure *Pressure `json:"memoryPressure,omitempty"` + IOPressure *Pressure `json:"ioPressure,omitempty"` +} + +func (s *SysSample) Timestamp() time.Time { + return s.Timestamp_ +} diff --git a/vendor/github.com/moby/buildkit/executor/resources/types/types.go b/vendor/github.com/moby/buildkit/executor/resources/types/types.go new file mode 100644 index 0000000000..9bac557e21 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/resources/types/types.go @@ -0,0 +1,104 @@ +package types + +import ( + "context" + "time" + + "github.com/moby/buildkit/util/network" +) + +type Recorder interface { + Start() + Close() + CloseAsync(func(context.Context) error) error + Wait() error + Samples() (*Samples, error) +} + +type Samples struct { + Samples []*Sample `json:"samples,omitempty"` + SysCPUStat *SysCPUStat `json:"sysCPUStat,omitempty"` +} + +// Sample represents a wrapper for sampled data of cgroupv2 controllers +type Sample struct { + //nolint + Timestamp_ time.Time `json:"timestamp"` + CPUStat *CPUStat `json:"cpuStat,omitempty"` + MemoryStat *MemoryStat `json:"memoryStat,omitempty"` + IOStat *IOStat `json:"ioStat,omitempty"` + PIDsStat *PIDsStat `json:"pidsStat,omitempty"` + NetStat *network.Sample `json:"netStat,omitempty"` +} + +func (s *Sample) Timestamp() time.Time { + return s.Timestamp_ +} + +// CPUStat represents the sampling state of the cgroupv2 CPU controller +type CPUStat struct { + UsageNanos *uint64 `json:"usageNanos,omitempty"` + UserNanos *uint64 `json:"userNanos,omitempty"` + SystemNanos *uint64 `json:"systemNanos,omitempty"` + NrPeriods *uint32 `json:"nrPeriods,omitempty"` + NrThrottled *uint32 `json:"nrThrottled,omitempty"` + ThrottledNanos *uint64 `json:"throttledNanos,omitempty"` + Pressure *Pressure `json:"pressure,omitempty"` +} + +// MemoryStat represents the sampling state of the cgroupv2 memory controller +type MemoryStat struct { + SwapBytes *uint64 `json:"swapBytes,omitempty"` + Anon *uint64 `json:"anon,omitempty"` + File *uint64 `json:"file,omitempty"` + Kernel *uint64 `json:"kernel,omitempty"` + KernelStack *uint64 `json:"kernelStack,omitempty"` + PageTables *uint64 `json:"pageTables,omitempty"` + Sock *uint64 `json:"sock,omitempty"` + Vmalloc *uint64 `json:"vmalloc,omitempty"` + Shmem *uint64 `json:"shmem,omitempty"` + FileMapped *uint64 `json:"fileMapped,omitempty"` + FileDirty *uint64 `json:"fileDirty,omitempty"` + FileWriteback *uint64 `json:"fileWriteback,omitempty"` + Slab *uint64 `json:"slab,omitempty"` + Pgscan *uint64 `json:"pgscan,omitempty"` + Pgsteal *uint64 `json:"pgsteal,omitempty"` + Pgfault *uint64 `json:"pgfault,omitempty"` + Pgmajfault *uint64 `json:"pgmajfault,omitempty"` + Peak *uint64 `json:"peak,omitempty"` + LowEvents uint64 `json:"lowEvents,omitempty"` + HighEvents uint64 `json:"highEvents,omitempty"` + MaxEvents uint64 `json:"maxEvents,omitempty"` + OomEvents uint64 `json:"oomEvents,omitempty"` + OomKillEvents uint64 `json:"oomKillEvents,omitempty"` + Pressure *Pressure `json:"pressure,omitempty"` +} + +// IOStat represents the sampling state of the cgroupv2 IO controller +type IOStat struct { + ReadBytes *uint64 `json:"readBytes,omitempty"` + WriteBytes *uint64 `json:"writeBytes,omitempty"` + DiscardBytes *uint64 `json:"discardBytes,omitempty"` + ReadIOs *uint64 `json:"readIOs,omitempty"` + WriteIOs *uint64 `json:"writeIOs,omitempty"` + DiscardIOs *uint64 `json:"discardIOs,omitempty"` + Pressure *Pressure `json:"pressure,omitempty"` +} + +// PIDsStat represents the sampling state of the cgroupv2 PIDs controller +type PIDsStat struct { + Current *uint64 `json:"current,omitempty"` +} + +// Pressure represents the sampling state of pressure files +type Pressure struct { + Some *PressureValues `json:"some"` + Full *PressureValues `json:"full"` +} + +type PressureValues struct { + Avg10 *float64 `json:"avg10"` + Avg60 *float64 `json:"avg60"` + Avg300 *float64 `json:"avg300"` + Total *uint64 `json:"total"` +} diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go index 213ebb7366..e804ee850b 100644 --- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go @@ -7,6 +7,7 @@ import ( "os" "os/exec" "path/filepath" + "strconv" "sync" "syscall" "time" @@ -22,6 +23,8 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/executor/oci" + "github.com/moby/buildkit/executor/resources" + resourcestypes "github.com/moby/buildkit/executor/resources/types" gatewayapi "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/solver/pb" @@ -50,6 +53,7 @@ type Opt struct { ApparmorProfile string SELinux bool TracingSocket string + ResourceMonitor *resources.Monitor } var defaultCommandCandidates = []string{"buildkit-runc", "runc"} @@ -70,6 +74,7 @@ type runcExecutor struct { apparmorProfile string selinux bool tracingSocket string + resmon *resources.Monitor } func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Executor, error) { @@ -92,7 +97,7 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex root := opt.Root - if err := os.MkdirAll(root, 0711); err != nil { + if err := os.MkdirAll(root, 0o711); err != nil { return nil, errors.Wrapf(err, "failed to create %s", root) } @@ -135,11 +140,12 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex apparmorProfile: opt.ApparmorProfile, selinux: opt.SELinux, tracingSocket: opt.TracingSocket, + resmon: opt.ResourceMonitor, } return w, nil } -func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) { +func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (rec resourcestypes.Recorder, err error) { meta := process.Meta startedOnce := sync.Once{} @@ -162,13 +168,18 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, provider, ok := w.networkProviders[meta.NetMode] if !ok { - return errors.Errorf("unknown network mode %s", meta.NetMode) + return nil, errors.Errorf("unknown network mode %s", meta.NetMode) } namespace, err := provider.New(ctx, meta.Hostname) if err != nil { - return err + return nil, err } - defer namespace.Close() + doReleaseNetwork := true + defer func() { + if doReleaseNetwork { + namespace.Close() + } + }() if meta.NetMode == pb.NetMode_HOST { bklog.G(ctx).Info("enabling HostNetworking") @@ -176,12 +187,12 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, resolvConf, err := oci.GetResolvConf(ctx, w.root, w.idmap, w.dns) if err != nil { - return err + return nil, err } hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, w.idmap, meta.Hostname) if err != nil { - return err + return nil, err } if clean != nil { defer clean() @@ -189,12 +200,12 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, mountable, err := root.Src.Mount(ctx, false) if err != nil { - return err + return nil, err } rootMount, release, err := mountable.Mount() if err != nil { - return err + return nil, err } if release != nil { defer release() @@ -205,8 +216,8 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, } bundle := filepath.Join(w.root, id) - if err := os.Mkdir(bundle, 0711); err != nil { - return err + if err := os.Mkdir(bundle, 0o711); err != nil { + return nil, err } defer os.RemoveAll(bundle) @@ -216,24 +227,24 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, } rootFSPath := filepath.Join(bundle, "rootfs") - if err := idtools.MkdirAllAndChown(rootFSPath, 0700, identity); err != nil { - return err + if err := idtools.MkdirAllAndChown(rootFSPath, 0o700, identity); err != nil { + return nil, err } if err := mount.All(rootMount, rootFSPath); err != nil { - return err + return nil, err } defer mount.Unmount(rootFSPath, 0) - defer executor.MountStubsCleaner(rootFSPath, mounts, meta.RemoveMountStubsRecursive)() + defer executor.MountStubsCleaner(ctx, rootFSPath, mounts, meta.RemoveMountStubsRecursive)() uid, gid, sgids, err := oci.GetUser(rootFSPath, meta.User) if err != nil { - return err + return nil, err } f, err := os.Create(filepath.Join(bundle, "config.json")) if err != nil { - return err + return nil, err } defer f.Close() @@ -250,13 +261,13 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, if w.idmap != nil { identity, err = w.idmap.ToHost(identity) if err != nil { - return err + return nil, err } } spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, w.processMode, w.idmap, w.apparmorProfile, w.selinux, w.tracingSocket, opts...) if err != nil { - return err + return nil, err } defer cleanup() @@ -267,11 +278,11 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, newp, err := fs.RootPath(rootFSPath, meta.Cwd) if err != nil { - return errors.Wrapf(err, "working dir %s points to invalid target", newp) + return nil, errors.Wrapf(err, "working dir %s points to invalid target", newp) } if _, err := os.Stat(newp); err != nil { - if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil { - return errors.Wrapf(err, "failed to create working directory %s", newp) + if err := idtools.MkdirAllAndChown(newp, 0o755, identity); err != nil { + return nil, errors.Wrapf(err, "failed to create working directory %s", newp) } } @@ -279,59 +290,63 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, spec.Process.OOMScoreAdj = w.oomScoreAdj if w.rootless { if err := rootlessspecconv.ToRootless(spec); err != nil { - return err + return nil, err } } if err := json.NewEncoder(f).Encode(spec); err != nil { - return err + return nil, err } - // runCtx/killCtx is used for extra check in case the kill command blocks - runCtx, cancelRun := context.WithCancel(context.Background()) - defer cancelRun() - - ended := make(chan struct{}) - go func() { - for { - select { - case <-ctx.Done(): - killCtx, timeout := context.WithTimeout(context.Background(), 7*time.Second) - if err := w.runc.Kill(killCtx, id, int(syscall.SIGKILL), nil); err != nil { - bklog.G(ctx).Errorf("failed to kill runc %s: %+v", id, err) - select { - case <-killCtx.Done(): - timeout() - cancelRun() - return - default: - } - } - timeout() - select { - case <-time.After(50 * time.Millisecond): - case <-ended: - return - } - case <-ended: - return - } - } - }() - bklog.G(ctx).Debugf("> creating %s %v", id, meta.Args) + cgroupPath := spec.Linux.CgroupsPath + if cgroupPath != "" { + rec, err = w.resmon.RecordNamespace(cgroupPath, resources.RecordOpt{ + NetworkSampler: namespace, + }) + if err != nil { + return nil, err + } + } + trace.SpanFromContext(ctx).AddEvent("Container created") - err = w.run(runCtx, id, bundle, process, func() { + err = w.run(ctx, id, bundle, process, func() { startedOnce.Do(func() { trace.SpanFromContext(ctx).AddEvent("Container started") if started != nil { close(started) } + if rec != nil { + rec.Start() + } }) - }) - close(ended) - return exitError(ctx, err) + }, true) + + releaseContainer := func(ctx context.Context) error { + err := w.runc.Delete(ctx, id, &runc.DeleteOpts{}) + err1 := namespace.Close() + if err == nil { + err = err1 + } + return err + } + doReleaseNetwork = false + + err = exitError(ctx, err) + if err != nil { + if rec != nil { + rec.Close() + } + releaseContainer(context.TODO()) + return nil, err + } + + if rec == nil { + return nil, releaseContainer(context.TODO()) + } + + return rec, rec.CloseAsync(releaseContainer) } func exitError(ctx context.Context, err error) error { @@ -341,7 +356,7 @@ func exitError(ctx context.Context, err error) error { Err: err, } var runcExitError *runc.ExitError - if errors.As(err, &runcExitError) { + if errors.As(err, &runcExitError) && runcExitError.Status >= 0 { exitErr = &gatewayapi.ExitError{ ExitCode: uint32(runcExitError.Status), } @@ -462,23 +477,190 @@ func (s *forwardIO) Stderr() io.ReadCloser { return nil } -// startingProcess is to track the os process so we can send signals to it. -type startingProcess struct { - Process *os.Process - ready chan struct{} +// newRuncProcKiller returns an abstraction for sending SIGKILL to the +// process inside the container initiated from `runc run`. +func newRunProcKiller(runC *runc.Runc, id string) procKiller { + return procKiller{runC: runC, id: id} } -// Release will free resources with a startingProcess. -func (p *startingProcess) Release() { - if p.Process != nil { - p.Process.Release() +// newExecProcKiller returns an abstraction for sending SIGKILL to the +// process inside the container initiated from `runc exec`. +func newExecProcKiller(runC *runc.Runc, id string) (procKiller, error) { + // for `runc exec` we need to create a pidfile and read it later to kill + // the process + tdir, err := os.MkdirTemp("", "runc") + if err != nil { + return procKiller{}, errors.Wrap(err, "failed to create directory for runc pidfile") + } + + return procKiller{ + runC: runC, + id: id, + pidfile: filepath.Join(tdir, "pidfile"), + cleanup: func() { + os.RemoveAll(tdir) + }, + }, nil +} + +type procKiller struct { + runC *runc.Runc + id string + pidfile string + cleanup func() +} + +// Cleanup will delete any tmp files created for the pidfile allocation +// if this killer was for a `runc exec` process. +func (k procKiller) Cleanup() { + if k.cleanup != nil { + k.cleanup() } } -// WaitForReady will wait until the Process has been populated or the -// provided context was cancelled. This should be called before using -// the Process field. -func (p *startingProcess) WaitForReady(ctx context.Context) error { +// Kill will send SIGKILL to the process running inside the container. +// If the process was created by `runc run` then we will use `runc kill`, +// otherwise for `runc exec` we will read the pid from a pidfile and then +// send the signal directly that process. +func (k procKiller) Kill(ctx context.Context) (err error) { + bklog.G(ctx).Debugf("sending sigkill to process in container %s", k.id) + defer func() { + if err != nil { + bklog.G(ctx).Errorf("failed to kill process in container id %s: %+v", k.id, err) + } + }() + + // this timeout is generally a no-op, the Kill ctx should already have a + // shorter timeout but here as a fail-safe for future refactoring. + ctx, timeout := context.WithTimeout(ctx, 10*time.Second) + defer timeout() + + if k.pidfile == "" { + // for `runc run` process we use `runc kill` to terminate the process + return k.runC.Kill(ctx, k.id, int(syscall.SIGKILL), nil) + } + + // `runc exec` will write the pidfile a few milliseconds after we + // get the runc pid via the startedCh, so we might need to retry until + // it appears in the edge case where we want to kill a process + // immediately after it was created. + var pidData []byte + for { + pidData, err = os.ReadFile(k.pidfile) + if err != nil { + if os.IsNotExist(err) { + select { + case <-ctx.Done(): + return errors.New("context cancelled before runc wrote pidfile") + case <-time.After(10 * time.Millisecond): + continue + } + } + return errors.Wrap(err, "failed to read pidfile from runc") + } + break + } + pid, err := strconv.Atoi(string(pidData)) + if err != nil { + return errors.Wrap(err, "read invalid pid from pidfile") + } + process, err := os.FindProcess(pid) + if err != nil { + // error only possible on non-unix hosts + return errors.Wrapf(err, "failed to find process for pid %d from pidfile", pid) + } + defer process.Release() + return process.Signal(syscall.SIGKILL) +} + +// procHandle is to track the process so we can send signals to it +// and handle graceful shutdown. +type procHandle struct { + // this is for the runc process (not the process in-container) + monitorProcess *os.Process + ready chan struct{} + ended chan struct{} + shutdown func() + // this this only used when the request context is canceled and we need + // to kill the in-container process. + killer procKiller +} + +// runcProcessHandle will create a procHandle that will be monitored, where +// on ctx.Done the in-container process will receive a SIGKILL. The returned +// context should be used for the go-runc.(Run|Exec) invocations. The returned +// context will only be canceled in the case where the request context is +// canceled and we are unable to send the SIGKILL to the in-container process. +// The goal is to allow for runc to gracefully shutdown when the request context +// is cancelled. +func runcProcessHandle(ctx context.Context, killer procKiller) (*procHandle, context.Context) { + runcCtx, cancel := context.WithCancel(context.Background()) + p := &procHandle{ + ready: make(chan struct{}), + ended: make(chan struct{}), + shutdown: cancel, + killer: killer, + } + // preserve the logger on the context used for the runc process handling + runcCtx = bklog.WithLogger(runcCtx, bklog.G(ctx)) + + go func() { + // Wait for pid + select { + case <-ctx.Done(): + return // nothing to kill + case <-p.ready: + } + + for { + select { + case <-ctx.Done(): + killCtx, timeout := context.WithTimeout(context.Background(), 7*time.Second) + if err := p.killer.Kill(killCtx); err != nil { + select { + case <-killCtx.Done(): + timeout() + cancel() + return + default: + } + } + timeout() + select { + case <-time.After(50 * time.Millisecond): + case <-p.ended: + return + } + case <-p.ended: + return + } + } + }() + + return p, runcCtx +} + +// Release will free resources with a procHandle. +func (p *procHandle) Release() { + close(p.ended) + if p.monitorProcess != nil { + p.monitorProcess.Release() + } +} + +// Shutdown should be called after the runc process has exited. This will allow +// the signal handling and tty resize loops to exit, terminating the +// goroutines. +func (p *procHandle) Shutdown() { + if p.shutdown != nil { + p.shutdown() + } +} + +// WaitForReady will wait until we have received the runc pid via the go-runc +// Started channel, or until the request context is canceled. This should +// return without errors before attempting to send signals to the runc process. +func (p *procHandle) WaitForReady(ctx context.Context) error { select { case <-ctx.Done(): return ctx.Err() @@ -487,35 +669,37 @@ func (p *startingProcess) WaitForReady(ctx context.Context) error { } } -// WaitForStart will record the pid reported by Runc via the channel. -// We wait for up to 10s for the runc process to start. If the started +// WaitForStart will record the runc pid reported by go-runc via the channel. +// We wait for up to 10s for the runc pid to be reported. If the started // callback is non-nil it will be called after receiving the pid. -func (p *startingProcess) WaitForStart(ctx context.Context, startedCh <-chan int, started func()) error { +func (p *procHandle) WaitForStart(ctx context.Context, startedCh <-chan int, started func()) error { startedCtx, timeout := context.WithTimeout(ctx, 10*time.Second) defer timeout() - var err error select { case <-startedCtx.Done(): - return errors.New("runc started message never received") - case pid, ok := <-startedCh: + return errors.New("go-runc started message never received") + case runcPid, ok := <-startedCh: if !ok { - return errors.New("runc process failed to send pid") + return errors.New("go-runc failed to send pid") } if started != nil { started() } - p.Process, err = os.FindProcess(pid) + var err error + p.monitorProcess, err = os.FindProcess(runcPid) if err != nil { - return errors.Wrapf(err, "unable to find runc process for pid %d", pid) + // error only possible on non-unix hosts + return errors.Wrapf(err, "failed to find runc process %d", runcPid) } close(p.ready) } return nil } -// handleSignals will wait until the runcProcess is ready then will -// send each signal received on the channel to the process. -func handleSignals(ctx context.Context, runcProcess *startingProcess, signals <-chan syscall.Signal) error { +// handleSignals will wait until the procHandle is ready then will +// send each signal received on the channel to the runc process (not directly +// to the in-container process) +func handleSignals(ctx context.Context, runcProcess *procHandle, signals <-chan syscall.Signal) error { if signals == nil { return nil } @@ -528,8 +712,15 @@ func handleSignals(ctx context.Context, runcProcess *startingProcess, signals <- case <-ctx.Done(): return nil case sig := <-signals: - err := runcProcess.Process.Signal(sig) - if err != nil { + if sig == syscall.SIGKILL { + // never send SIGKILL directly to runc, it needs to go to the + // process in-container + if err := runcProcess.killer.Kill(ctx); err != nil { + return err + } + continue + } + if err := runcProcess.monitorProcess.Signal(sig); err != nil { bklog.G(ctx).Errorf("failed to signal %s to process: %s", sig, err) return err } diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go index 447c4a96b9..28955f9a45 100644 --- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go @@ -8,6 +8,7 @@ import ( runc "github.com/containerd/go-runc" "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/util/bklog" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -17,15 +18,21 @@ var unsupportedConsoleError = errors.New("tty for runc is only supported on linu func updateRuncFieldsForHostOS(runtime *runc.Runc) {} -func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func()) error { +func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), keep bool) error { if process.Meta.Tty { return unsupportedConsoleError } - return w.commonCall(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error { + extraArgs := []string{} + if keep { + extraArgs = append(extraArgs, "--keep") + } + killer := newRunProcKiller(w.runc, id) + return w.commonCall(ctx, id, bundle, process, started, killer, func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error { _, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{ - NoPivot: w.noPivot, - Started: started, - IO: io, + NoPivot: w.noPivot, + Started: started, + IO: io, + ExtraArgs: extraArgs, }) return err }) @@ -35,38 +42,47 @@ func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess if process.Meta.Tty { return unsupportedConsoleError } - return w.commonCall(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error { + + killer, err := newExecProcKiller(w.runc, id) + if err != nil { + return errors.Wrap(err, "failed to initialize process killer") + } + defer killer.Cleanup() + + return w.commonCall(ctx, id, bundle, process, started, killer, func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error { return w.runc.Exec(ctx, id, *specsProcess, &runc.ExecOpts{ Started: started, IO: io, + PidFile: pidfile, }) }) } -type runcCall func(ctx context.Context, started chan<- int, io runc.IO) error +type runcCall func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error // commonCall is the common run/exec logic used for non-linux runtimes. A tty // is only supported for linux, so this really just handles signal propagation // to the started runc process. -func (w *runcExecutor) commonCall(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), call runcCall) error { - runcProcess := &startingProcess{ - ready: make(chan struct{}), - } +func (w *runcExecutor) commonCall(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), killer procKiller, call runcCall) error { + runcProcess, ctx := runcProcessHandle(ctx, killer) defer runcProcess.Release() - var eg errgroup.Group - egCtx, cancel := context.WithCancel(ctx) - defer eg.Wait() - defer cancel() + eg, ctx := errgroup.WithContext(ctx) + defer func() { + if err := eg.Wait(); err != nil && !errors.Is(err, context.Canceled) { + bklog.G(ctx).Errorf("runc process monitoring error: %s", err) + } + }() + defer runcProcess.Shutdown() startedCh := make(chan int, 1) eg.Go(func() error { - return runcProcess.WaitForStart(egCtx, startedCh, started) + return runcProcess.WaitForStart(ctx, startedCh, started) }) eg.Go(func() error { - return handleSignals(egCtx, runcProcess, process.Signal) + return handleSignals(ctx, runcProcess, process.Signal) }) - return call(ctx, startedCh, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}) + return call(ctx, startedCh, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}, killer.pidfile) } diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go index 15ea812a5a..e2c14950f0 100644 --- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go @@ -21,50 +21,64 @@ func updateRuncFieldsForHostOS(runtime *runc.Runc) { runtime.PdeathSignal = syscall.SIGKILL // this can still leak the process } -func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func()) error { - return w.callWithIO(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error { +func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), keep bool) error { + killer := newRunProcKiller(w.runc, id) + return w.callWithIO(ctx, id, bundle, process, started, killer, func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error { + extraArgs := []string{} + if keep { + extraArgs = append(extraArgs, "--keep") + } _, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{ - NoPivot: w.noPivot, - Started: started, - IO: io, + NoPivot: w.noPivot, + Started: started, + IO: io, + ExtraArgs: extraArgs, }) return err }) } func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess *specs.Process, process executor.ProcessInfo, started func()) error { - return w.callWithIO(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error { + killer, err := newExecProcKiller(w.runc, id) + if err != nil { + return errors.Wrap(err, "failed to initialize process killer") + } + defer killer.Cleanup() + + return w.callWithIO(ctx, id, bundle, process, started, killer, func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error { return w.runc.Exec(ctx, id, *specsProcess, &runc.ExecOpts{ Started: started, IO: io, + PidFile: pidfile, }) }) } -type runcCall func(ctx context.Context, started chan<- int, io runc.IO) error +type runcCall func(ctx context.Context, started chan<- int, io runc.IO, pidfile string) error -func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), call runcCall) error { - runcProcess := &startingProcess{ - ready: make(chan struct{}), - } +func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), killer procKiller, call runcCall) error { + runcProcess, ctx := runcProcessHandle(ctx, killer) defer runcProcess.Release() - var eg errgroup.Group - egCtx, cancel := context.WithCancel(ctx) - defer eg.Wait() - defer cancel() + eg, ctx := errgroup.WithContext(ctx) + defer func() { + if err := eg.Wait(); err != nil && !errors.Is(err, context.Canceled) { + bklog.G(ctx).Errorf("runc process monitoring error: %s", err) + } + }() + defer runcProcess.Shutdown() startedCh := make(chan int, 1) eg.Go(func() error { - return runcProcess.WaitForStart(egCtx, startedCh, started) + return runcProcess.WaitForStart(ctx, startedCh, started) }) eg.Go(func() error { - return handleSignals(egCtx, runcProcess, process.Signal) + return handleSignals(ctx, runcProcess, process.Signal) }) if !process.Meta.Tty { - return call(ctx, startedCh, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}) + return call(ctx, startedCh, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}, killer.pidfile) } ptm, ptsName, err := console.NewPty() @@ -84,7 +98,7 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces } pts.Close() ptm.Close() - cancel() // this will shutdown resize and signal loops + runcProcess.Shutdown() err := eg.Wait() if err != nil { bklog.G(ctx).Warningf("error while shutting down tty io: %s", err) @@ -119,13 +133,13 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces } eg.Go(func() error { - err := runcProcess.WaitForReady(egCtx) + err := runcProcess.WaitForReady(ctx) if err != nil { return err } for { select { - case <-egCtx.Done(): + case <-ctx.Done(): return nil case resize := <-process.Resize: err = ptm.Resize(console.WinSize{ @@ -135,7 +149,9 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces if err != nil { bklog.G(ctx).Errorf("failed to resize ptm: %s", err) } - err = runcProcess.Process.Signal(signal.SIGWINCH) + // SIGWINCH must be sent to the runc monitor process, as + // terminal resizing is done in runc. + err = runcProcess.monitorProcess.Signal(signal.SIGWINCH) if err != nil { bklog.G(ctx).Errorf("failed to send SIGWINCH to process: %s", err) } @@ -154,5 +170,5 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces runcIO.stderr = pts } - return call(ctx, startedCh, runcIO) + return call(ctx, startedCh, runcIO, killer.pidfile) } diff --git a/vendor/github.com/moby/buildkit/executor/stubs.go b/vendor/github.com/moby/buildkit/executor/stubs.go index 22a8ac1310..e2ac460e20 100644 --- a/vendor/github.com/moby/buildkit/executor/stubs.go +++ b/vendor/github.com/moby/buildkit/executor/stubs.go @@ -1,17 +1,18 @@ package executor import ( + "context" "errors" "os" "path/filepath" "syscall" "github.com/containerd/continuity/fs" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/system" - "github.com/sirupsen/logrus" ) -func MountStubsCleaner(dir string, mounts []Mount, recursive bool) func() { +func MountStubsCleaner(ctx context.Context, dir string, mounts []Mount, recursive bool) func() { names := []string{"/etc/resolv.conf", "/etc/hosts"} for _, m := range mounts { @@ -72,23 +73,23 @@ func MountStubsCleaner(dir string, mounts []Mount, recursive bool) func() { dir := filepath.Dir(p) dirSt, err := os.Stat(dir) if err != nil { - logrus.WithError(err).Warnf("Failed to stat %q (parent of mount stub %q)", dir, p) + bklog.G(ctx).WithError(err).Warnf("Failed to stat %q (parent of mount stub %q)", dir, p) continue } mtime := dirSt.ModTime() atime, err := system.Atime(dirSt) if err != nil { - logrus.WithError(err).Warnf("Failed to stat atime of %q (parent of mount stub %q)", dir, p) + bklog.G(ctx).WithError(err).Warnf("Failed to stat atime of %q (parent of mount stub %q)", dir, p) atime = mtime } if err := os.Remove(p); err != nil { - logrus.WithError(err).Warnf("Failed to remove mount stub %q", p) + bklog.G(ctx).WithError(err).Warnf("Failed to remove mount stub %q", p) } // Restore the timestamps of the dir if err := os.Chtimes(dir, atime, mtime); err != nil { - logrus.WithError(err).Warnf("Failed to restore time time mount stub timestamp (os.Chtimes(%q, %v, %v))", dir, atime, mtime) + bklog.G(ctx).WithError(err).Warnf("Failed to restore time time mount stub timestamp (os.Chtimes(%q, %v, %v))", dir, atime, mtime) } } } diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/attestations.go b/vendor/github.com/moby/buildkit/exporter/containerimage/attestations.go index a41c6039f0..1c4837e36f 100644 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/attestations.go +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/attestations.go @@ -20,11 +20,11 @@ import ( ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" spdx_json "github.com/spdx/tools-golang/json" - "github.com/spdx/tools-golang/spdx/common" - spdx "github.com/spdx/tools-golang/spdx/v2_3" + "github.com/spdx/tools-golang/spdx" + "github.com/spdx/tools-golang/spdx/v2/common" ) -var intotoPlatform ocispecs.Platform = ocispecs.Platform{ +var intotoPlatform = ocispecs.Platform{ Architecture: "unknown", OS: "unknown", } @@ -122,7 +122,7 @@ func supplementSBOM(ctx context.Context, s session.Group, target cache.Immutable } func decodeSPDX(dt []byte) (s *spdx.Document, err error) { - doc, err := spdx_json.Load2_3(bytes.NewReader(dt)) + doc, err := spdx_json.Read(bytes.NewReader(dt)) if err != nil { return nil, errors.Wrap(err, "unable to decode spdx") } @@ -134,7 +134,7 @@ func decodeSPDX(dt []byte) (s *spdx.Document, err error) { func encodeSPDX(s *spdx.Document) (dt []byte, err error) { w := bytes.NewBuffer(nil) - err = spdx_json.Save2_3(s, w) + err = spdx_json.Write(s, w) if err != nil { return nil, errors.Wrap(err, "unable to encode spdx") } diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/export.go b/vendor/github.com/moby/buildkit/exporter/containerimage/export.go index 55eaf3ff58..2c2775ac7e 100644 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/export.go +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/export.go @@ -5,19 +5,18 @@ import ( "encoding/base64" "encoding/json" "fmt" + "sort" "strconv" "strings" - "time" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/pkg/epoch" "github.com/containerd/containerd/platforms" - "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/rootfs" - intoto "github.com/in-toto/in-toto-golang/in_toto" "github.com/moby/buildkit/cache" cacheconfig "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/exporter" @@ -33,17 +32,10 @@ import ( "github.com/opencontainers/image-spec/identity" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "golang.org/x/sync/errgroup" ) const ( - keyPush = "push" - keyPushByDigest = "push-by-digest" - keyInsecure = "registry.insecure" - keyUnpack = "unpack" - keyDanglingPrefix = "dangling-name-prefix" - keyNameCanonical = "name-canonical" - keyStore = "store" - // keyUnsafeInternalStoreAllowIncomplete should only be used for tests. This option allows exporting image to the image store // as well as lacking some blobs in the content store. Some integration tests for lazyref behaviour depends on this option. // Ignored when store=false. @@ -78,20 +70,19 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp RefCfg: cacheconfig.RefConfig{ Compression: compression.New(compression.Default), }, - BuildInfo: true, ForceInlineAttestations: true, }, store: true, } - opt, err := i.opts.Load(opt) + opt, err := i.opts.Load(ctx, opt) if err != nil { return nil, err } for k, v := range opt { - switch k { - case keyPush: + switch exptypes.ImageExporterOptKey(k) { + case exptypes.OptKeyPush: if v == "" { i.push = true continue @@ -101,7 +92,7 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp return nil, errors.Wrapf(err, "non-bool value specified for %s", k) } i.push = b - case keyPushByDigest: + case exptypes.OptKeyPushByDigest: if v == "" { i.pushByDigest = true continue @@ -111,7 +102,7 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp return nil, errors.Wrapf(err, "non-bool value specified for %s", k) } i.pushByDigest = b - case keyInsecure: + case exptypes.OptKeyInsecure: if v == "" { i.insecure = true continue @@ -121,7 +112,7 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp return nil, errors.Wrapf(err, "non-bool value specified for %s", k) } i.insecure = b - case keyUnpack: + case exptypes.OptKeyUnpack: if v == "" { i.unpack = true continue @@ -131,7 +122,7 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp return nil, errors.Wrapf(err, "non-bool value specified for %s", k) } i.unpack = b - case keyStore: + case exptypes.OptKeyStore: if v == "" { i.store = true continue @@ -151,9 +142,9 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp return nil, errors.Wrapf(err, "non-bool value specified for %s", k) } i.storeAllowIncomplete = b - case keyDanglingPrefix: + case exptypes.OptKeyDanglingPrefix: i.danglingPrefix = v - case keyNameCanonical: + case exptypes.OptKeyNameCanonical: if v == "" { i.nameCanonical = true continue @@ -247,60 +238,73 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source for _, targetName := range targetNames { if e.opt.Images != nil && e.store { tagDone := progress.OneOff(ctx, "naming to "+targetName) - img := images.Image{ - Target: *desc, - CreatedAt: time.Now(), + + // imageClientCtx is used for propagating the epoch to e.opt.Images.Update() and e.opt.Images.Create(). + // + // Ideally, we should be able to propagate the epoch via images.Image.CreatedAt. + // However, due to a bug of containerd, we are temporarily stuck with this workaround. + // https://github.com/containerd/containerd/issues/8322 + imageClientCtx := ctx + if e.opts.Epoch != nil { + imageClientCtx = epoch.WithSourceDateEpoch(imageClientCtx, e.opts.Epoch) } + img := images.Image{ + Target: *desc, + // CreatedAt in images.Images is ignored due to a bug of containerd. + // See the comment lines for imageClientCtx. + } + sfx := []string{""} if nameCanonical { sfx = append(sfx, "@"+desc.Digest.String()) } for _, sfx := range sfx { img.Name = targetName + sfx - if _, err := e.opt.Images.Update(ctx, img); err != nil { + if _, err := e.opt.Images.Update(imageClientCtx, img); err != nil { if !errors.Is(err, errdefs.ErrNotFound) { return nil, nil, tagDone(err) } - if _, err := e.opt.Images.Create(ctx, img); err != nil { + if _, err := e.opt.Images.Create(imageClientCtx, img); err != nil { return nil, nil, tagDone(err) } } } tagDone(nil) - if src.Ref != nil && e.unpack { + if e.unpack { if err := e.unpackImage(ctx, img, src, session.NewGroup(sessionID)); err != nil { return nil, nil, err } } if !e.storeAllowIncomplete { + var refs []cache.ImmutableRef if src.Ref != nil { - remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) - if err != nil { - return nil, nil, err - } - remote := remotes[0] - if unlazier, ok := remote.Provider.(cache.Unlazier); ok { - if err := unlazier.Unlazy(ctx); err != nil { - return nil, nil, err - } - } + refs = append(refs, src.Ref) } - if len(src.Refs) > 0 { - for _, r := range src.Refs { - remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + for _, ref := range src.Refs { + refs = append(refs, ref) + } + eg, ctx := errgroup.WithContext(ctx) + for _, ref := range refs { + ref := ref + eg.Go(func() error { + remotes, err := ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) if err != nil { - return nil, nil, err + return err } remote := remotes[0] if unlazier, ok := remote.Provider.(cache.Unlazier); ok { if err := unlazier.Unlazy(ctx); err != nil { - return nil, nil, err + return err } } - } + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, nil, err } } } @@ -330,10 +334,18 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source } func (e *imageExporterInstance) pushImage(ctx context.Context, src *exporter.Source, sessionID string, targetName string, dgst digest.Digest) error { + var refs []cache.ImmutableRef + if src.Ref != nil { + refs = append(refs, src.Ref) + } + for _, ref := range src.Refs { + refs = append(refs, ref) + } + annotations := map[digest.Digest]map[string]string{} mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore()) - if src.Ref != nil { - remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + for _, ref := range refs { + remotes, err := ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) if err != nil { return err } @@ -343,25 +355,36 @@ func (e *imageExporterInstance) pushImage(ctx context.Context, src *exporter.Sou addAnnotations(annotations, desc) } } - if len(src.Refs) > 0 { - for _, r := range src.Refs { - remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) - if err != nil { - return err - } - remote := remotes[0] - for _, desc := range remote.Descriptors { - mprovider.Add(desc.Digest, remote.Provider) - addAnnotations(annotations, desc) - } - } - } - - ctx = remotes.WithMediaTypeKeyPrefix(ctx, intoto.PayloadType, "intoto") return push.Push(ctx, e.opt.SessionManager, sessionID, mprovider, e.opt.ImageWriter.ContentStore(), dgst, targetName, e.insecure, e.opt.RegistryHosts, e.pushByDigest, annotations) } func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Image, src *exporter.Source, s session.Group) (err0 error) { + matcher := platforms.Only(platforms.Normalize(platforms.DefaultSpec())) + + ps, err := exptypes.ParsePlatforms(src.Metadata) + if err != nil { + return err + } + matching := []exptypes.Platform{} + for _, p2 := range ps.Platforms { + if matcher.Match(p2.Platform) { + matching = append(matching, p2) + } + } + if len(matching) == 0 { + // current platform was not found, so skip unpacking + return nil + } + sort.SliceStable(matching, func(i, j int) bool { + return matcher.Less(matching[i].Platform, matching[j].Platform) + }) + + ref, _ := src.FindRef(matching[0].ID) + if ref == nil { + // ref has no layers, so nothing to unpack + return nil + } + unpackDone := progress.OneOff(ctx, "unpacking to "+img.Name) defer func() { unpackDone(err0) @@ -379,16 +402,7 @@ func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Imag return err } - topLayerRef := src.Ref - if len(src.Refs) > 0 { - if r, ok := src.Refs[defaultPlatform()]; ok { - topLayerRef = r - } else { - return errors.Errorf("no reference for default platform %s", defaultPlatform()) - } - } - - remotes, err := topLayerRef.GetRemotes(ctx, true, e.opts.RefCfg, false, s) + remotes, err := ref.GetRemotes(ctx, true, e.opts.RefCfg, false, s) if err != nil { return err } @@ -461,12 +475,6 @@ func addAnnotations(m map[digest.Digest]map[string]string, desc ocispecs.Descrip } } -func defaultPlatform() string { - // Use normalized platform string to avoid the mismatch with platform options which - // are normalized using platforms.Normalize() - return platforms.Format(platforms.Normalize(platforms.DefaultSpec())) -} - func NewDescriptorReference(desc ocispecs.Descriptor, release func(context.Context) error) exporter.DescriptorReference { return &descriptorReference{ desc: desc, diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/keys.go b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/keys.go new file mode 100644 index 0000000000..c432218499 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/keys.go @@ -0,0 +1,75 @@ +package exptypes + +import commonexptypes "github.com/moby/buildkit/exporter/exptypes" + +type ImageExporterOptKey string + +// Options keys supported by the image exporter output. +var ( + // Name of the image. + // Value: string + OptKeyName ImageExporterOptKey = "name" + + // Push after creating image. + // Value: bool + OptKeyPush ImageExporterOptKey = "push" + + // Push unnamed image. + // Value: bool + OptKeyPushByDigest ImageExporterOptKey = "push-by-digest" + + // Allow pushing to insecure HTTP registry. + // Value: bool + OptKeyInsecure ImageExporterOptKey = "registry.insecure" + + // Unpack image after it's created (containerd). + // Value: bool + OptKeyUnpack ImageExporterOptKey = "unpack" + + // Fallback image name prefix if image name isn't provided. + // If used, image will be named as @ + // Value: string + OptKeyDanglingPrefix ImageExporterOptKey = "dangling-name-prefix" + + // Creates additional image name with format @ + // Value: bool + OptKeyNameCanonical ImageExporterOptKey = "name-canonical" + + // Store the resulting image along with all of the content it references. + // Ignored if the worker doesn't have image store (e.g. OCI worker). + // Value: bool + OptKeyStore ImageExporterOptKey = "store" + + // Use OCI mediatypes instead of Docker in JSON configs. + // Value: bool + OptKeyOCITypes ImageExporterOptKey = "oci-mediatypes" + + // Force attestation to be attached. + // Value: bool + OptKeyForceInlineAttestations ImageExporterOptKey = "attestation-inline" + + // Mark layers as non-distributable if they are found to use a + // non-distributable media type. When this option is not set, the exporter + // will change the media type of the layer to a distributable one. + // Value: bool + OptKeyPreferNondistLayers ImageExporterOptKey = "prefer-nondist-layers" + + // Clamp produced timestamps. For more information see the + // SOURCE_DATE_EPOCH specification. + // Value: int (number of seconds since Unix epoch) + OptKeySourceDateEpoch ImageExporterOptKey = ImageExporterOptKey(commonexptypes.OptKeySourceDateEpoch) + + // Compression type for newly created and cached layers. + // estargz should be used with OptKeyOCITypes set to true. + // Value: string + OptKeyLayerCompression ImageExporterOptKey = "compression" + + // Force compression on all (including existing) layers. + // Value: bool + OptKeyForceCompression ImageExporterOptKey = "force-compression" + + // Compression level + // Value: int (0-9) for gzip and estargz + // Value: int (0-22) for zstd + OptKeyCompressionLevel ImageExporterOptKey = "compression-level" +) diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go index 4531360afa..c4d5721ea6 100644 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go @@ -11,9 +11,7 @@ const ( ExporterImageConfigDigestKey = "containerimage.config.digest" ExporterImageDescriptorKey = "containerimage.descriptor" ExporterInlineCache = "containerimage.inlinecache" - ExporterBuildInfo = "containerimage.buildinfo" // Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md ExporterPlatformsKey = "refs.platforms" - ExporterEpochKey = "source.date.epoch" ) // KnownRefMetadataKeys are the subset of exporter keys that can be suffixed by @@ -21,7 +19,6 @@ const ( var KnownRefMetadataKeys = []string{ ExporterImageConfigKey, ExporterInlineCache, - ExporterBuildInfo, } type Platforms struct { diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/image/docker_image.go b/vendor/github.com/moby/buildkit/exporter/containerimage/image/docker_image.go index a35d811d55..1af194b506 100644 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/image/docker_image.go +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/image/docker_image.go @@ -19,9 +19,10 @@ type HealthConfig struct { Test []string `json:",omitempty"` // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + StartInterval time.Duration `json:",omitempty"` // StartInterval is the time to wait between checks during the start period. // Retries is the number of consecutive failures needed to consider a container as unhealthy. // Zero means inherit. @@ -33,7 +34,6 @@ type ImageConfig struct { ocispecs.ImageConfig Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) // NetworkDisabled bool `json:",omitempty"` // Is network disabled // MacAddress string `json:",omitempty"` // Mac Address of the container diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/opts.go b/vendor/github.com/moby/buildkit/exporter/containerimage/opts.go index 4948eaad24..791f268afd 100644 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/opts.go +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/opts.go @@ -1,30 +1,16 @@ package containerimage import ( + "context" "strconv" "time" cacheconfig "github.com/moby/buildkit/cache/config" + "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/exporter/util/epoch" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/compression" "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - keyImageName = "name" - keyLayerCompression = "compression" - keyCompressionLevel = "compression-level" - keyForceCompression = "force-compression" - keyOCITypes = "oci-mediatypes" - keyBuildInfo = "buildinfo" - keyBuildInfoAttrs = "buildinfo-attrs" - keyForceInlineAttestations = "attestation-inline" - - // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was - // already found to use a non-distributable media type. - // When this option is not set, the exporter will change the media type of the layer to a distributable one. - keyPreferNondistLayers = "prefer-nondist-layers" ) type ImageCommitOpts struct { @@ -35,12 +21,9 @@ type ImageCommitOpts struct { Epoch *time.Time ForceInlineAttestations bool // force inline attestations to be attached - - BuildInfo bool // Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md - BuildInfoAttrs bool // Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md } -func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error) { +func (c *ImageCommitOpts) Load(ctx context.Context, opt map[string]string) (map[string]string, error) { rest := make(map[string]string) as, optb, err := ParseAnnotations(toBytesMap(opt)) @@ -54,32 +37,20 @@ func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error) return nil, err } + if c.RefCfg.Compression, err = compression.ParseAttributes(opt); err != nil { + return nil, err + } + for k, v := range opt { var err error - switch k { - case keyImageName: + switch exptypes.ImageExporterOptKey(k) { + case exptypes.OptKeyName: c.ImageName = v - case keyLayerCompression: - c.RefCfg.Compression.Type, err = compression.Parse(v) - case keyCompressionLevel: - ii, err2 := strconv.ParseInt(v, 10, 64) - if err != nil { - err = errors.Wrapf(err2, "non-int value %s specified for %s", v, k) - break - } - v := int(ii) - c.RefCfg.Compression.Level = &v - case keyForceCompression: - err = parseBoolWithDefault(&c.RefCfg.Compression.Force, k, v, true) - case keyOCITypes: + case exptypes.OptKeyOCITypes: err = parseBoolWithDefault(&c.OCITypes, k, v, true) - case keyBuildInfo: - err = parseBoolWithDefault(&c.BuildInfo, k, v, true) - case keyBuildInfoAttrs: - err = parseBoolWithDefault(&c.BuildInfoAttrs, k, v, false) - case keyForceInlineAttestations: + case exptypes.OptKeyForceInlineAttestations: err = parseBool(&c.ForceInlineAttestations, k, v) - case keyPreferNondistLayers: + case exptypes.OptKeyPreferNondistLayers: err = parseBool(&c.RefCfg.PreferNonDistributable, k, v) default: rest[k] = v @@ -91,11 +62,11 @@ func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error) } if c.RefCfg.Compression.Type.OnlySupportOCITypes() { - c.EnableOCITypes(c.RefCfg.Compression.Type.String()) + c.EnableOCITypes(ctx, c.RefCfg.Compression.Type.String()) } if c.RefCfg.Compression.Type.NeedsForceCompression() { - c.EnableForceCompression(c.RefCfg.Compression.Type.String()) + c.EnableForceCompression(ctx, c.RefCfg.Compression.Type.String()) } c.Annotations = c.Annotations.Merge(as) @@ -103,25 +74,25 @@ func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error) return rest, nil } -func (c *ImageCommitOpts) EnableOCITypes(reason string) { +func (c *ImageCommitOpts) EnableOCITypes(ctx context.Context, reason string) { if !c.OCITypes { message := "forcibly turning on oci-mediatype mode" if reason != "" { message += " for " + reason } - logrus.Warn(message) + bklog.G(ctx).Warn(message) c.OCITypes = true } } -func (c *ImageCommitOpts) EnableForceCompression(reason string) { +func (c *ImageCommitOpts) EnableForceCompression(ctx context.Context, reason string) { if !c.RefCfg.Compression.Force { message := "forcibly turning on force-compression mode" if reason != "" { message += " for " + reason } - logrus.Warn(message) + bklog.G(ctx).Warn(message) c.RefCfg.Compression.Force = true } diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go b/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go index 4cccd9db51..186f415b18 100644 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go @@ -26,8 +26,6 @@ import ( "github.com/moby/buildkit/solver/result" attestationTypes "github.com/moby/buildkit/util/attestation" "github.com/moby/buildkit/util/bklog" - "github.com/moby/buildkit/util/buildinfo" - binfotypes "github.com/moby/buildkit/util/buildinfo/types" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/purl" @@ -36,6 +34,7 @@ import ( digest "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/package-url/packageurl-go" "github.com/pkg/errors" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -102,7 +101,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session } } if len(a.Index)+len(a.IndexDescriptor)+len(a.ManifestDescriptor) > 0 { - opts.EnableOCITypes("annotations") + opts.EnableOCITypes(ctx, "annotations") } } @@ -127,15 +126,6 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session return nil, err } - var dtbi []byte - if opts.BuildInfo { - if dtbi, err = buildinfo.Format(exptypes.ParseKey(inp.Metadata, exptypes.ExporterBuildInfo, p), buildinfo.FormatOpts{ - RemoveAttrs: !opts.BuildInfoAttrs, - }); err != nil { - return nil, err - } - } - annotations := opts.Annotations.Platform(nil) if len(annotations.Index) > 0 || len(annotations.IndexDescriptor) > 0 { return nil, errors.Errorf("index annotations not supported for single platform export") @@ -143,7 +133,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p) inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p) - mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, opts, ref, config, &remotes[0], annotations, inlineCache, dtbi, opts.Epoch, session.NewGroup(sessionID)) + mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, opts, ref, config, &remotes[0], annotations, inlineCache, opts.Epoch, session.NewGroup(sessionID)) if err != nil { return nil, err } @@ -159,7 +149,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session } if len(inp.Attestations) > 0 { - opts.EnableOCITypes("attestations") + opts.EnableOCITypes(ctx, "attestations") } refs := make([]cache.ImmutableRef, 0, len(inp.Refs)) @@ -178,19 +168,11 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session return nil, err } - idx := struct { - // MediaType is reserved in the OCI spec but - // excluded from go types. - MediaType string `json:"mediaType,omitempty"` - - ocispecs.Index - }{ - MediaType: ocispecs.MediaTypeImageIndex, - Index: ocispecs.Index{ - Annotations: opts.Annotations.Platform(nil).Index, - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, + idx := ocispecs.Index{ + MediaType: ocispecs.MediaTypeImageIndex, + Annotations: opts.Annotations.Platform(nil).Index, + Versioned: specs.Versioned{ + SchemaVersion: 2, }, } @@ -210,15 +192,6 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p) inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p) - var dtbi []byte - if opts.BuildInfo { - if dtbi, err = buildinfo.Format(exptypes.ParseKey(inp.Metadata, exptypes.ExporterBuildInfo, p), buildinfo.FormatOpts{ - RemoveAttrs: !opts.BuildInfoAttrs, - }); err != nil { - return nil, err - } - } - remote := &remotes[remotesMap[p.ID]] if remote == nil { remote = &solver.Remote{ @@ -226,7 +199,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session } } - desc, _, err := ic.commitDistributionManifest(ctx, opts, r, config, remote, opts.Annotations.Platform(&p.Platform), inlineCache, dtbi, opts.Epoch, session.NewGroup(sessionID)) + desc, _, err := ic.commitDistributionManifest(ctx, opts, r, config, remote, opts.Annotations.Platform(&p.Platform), inlineCache, opts.Epoch, session.NewGroup(sessionID)) if err != nil { return nil, err } @@ -263,7 +236,7 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, session if name == "" { continue } - pl, err := purl.RefToPURL(name, &p.Platform) + pl, err := purl.RefToPURL(packageurl.TypeDocker, name, &p.Platform) if err != nil { return nil, err } @@ -350,7 +323,7 @@ func (ic *ImageWriter) exportLayers(ctx context.Context, refCfg cacheconfig.RefC return out, err } -func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *ImageCommitOpts, ref cache.ImmutableRef, config []byte, remote *solver.Remote, annotations *Annotations, inlineCache []byte, buildInfo []byte, epoch *time.Time, sg session.Group) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) { +func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *ImageCommitOpts, ref cache.ImmutableRef, config []byte, remote *solver.Remote, annotations *Annotations, inlineCache []byte, epoch *time.Time, sg session.Group) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) { if len(config) == 0 { var err error config, err = defaultImageConfig() @@ -369,7 +342,7 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *Ima return nil, nil, err } - config, err = patchImageConfig(config, remote.Descriptors, history, inlineCache, buildInfo, epoch) + config, err = patchImageConfig(config, remote.Descriptors, history, inlineCache, epoch) if err != nil { return nil, nil, err } @@ -386,24 +359,16 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *Ima configType = images.MediaTypeDockerSchema2Config } - mfst := struct { - // MediaType is reserved in the OCI spec but - // excluded from go types. - MediaType string `json:"mediaType,omitempty"` - - ocispecs.Manifest - }{ - MediaType: manifestType, - Manifest: ocispecs.Manifest{ - Annotations: annotations.Manifest, - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: ocispecs.Descriptor{ - Digest: configDigest, - Size: int64(len(config)), - MediaType: configType, - }, + mfst := ocispecs.Manifest{ + MediaType: manifestType, + Annotations: annotations.Manifest, + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: ocispecs.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, }, } @@ -411,9 +376,10 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *Ima "containerd.io/gc.ref.content.0": configDigest.String(), } - for _, desc := range remote.Descriptors { + for i, desc := range remote.Descriptors { desc.Annotations = RemoveInternalLayerAnnotations(desc.Annotations, opts.OCITypes) mfst.Layers = append(mfst.Layers, desc) + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = desc.Digest.String() } mfstJSON, err := json.MarshalIndent(mfst, "", " ") @@ -473,7 +439,7 @@ func (ic *ImageWriter) commitAttestationsManifest(ctx context.Context, opts *Ima } digest := digest.FromBytes(data) desc := ocispecs.Descriptor{ - MediaType: attestationTypes.MediaTypeDockerSchema2AttestationType, + MediaType: intoto.PayloadType, Digest: digest, Size: int64(len(data)), Annotations: map[string]string{ @@ -499,23 +465,15 @@ func (ic *ImageWriter) commitAttestationsManifest(ctx context.Context, opts *Ima MediaType: configType, } - mfst := struct { - // MediaType is reserved in the OCI spec but - // excluded from go types. - MediaType string `json:"mediaType,omitempty"` - - ocispecs.Manifest - }{ + mfst := ocispecs.Manifest{ MediaType: manifestType, - Manifest: ocispecs.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: ocispecs.Descriptor{ - Digest: configDigest, - Size: int64(len(config)), - MediaType: configType, - }, + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: ocispecs.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, }, } @@ -610,7 +568,7 @@ func parseHistoryFromConfig(dt []byte) ([]ocispecs.History, error) { return config.History, nil } -func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs.History, cache []byte, buildInfo []byte, epoch *time.Time) ([]byte, error) { +func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs.History, cache []byte, epoch *time.Time) ([]byte, error) { m := map[string]json.RawMessage{} if err := json.Unmarshal(dt, &m); err != nil { return nil, errors.Wrap(err, "failed to parse image config for patch") @@ -678,16 +636,6 @@ func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs m["moby.buildkit.cache.v0"] = dt } - if buildInfo != nil { - dt, err := json.Marshal(buildInfo) - if err != nil { - return nil, err - } - m[binfotypes.ImageConfigField] = dt - } else { - delete(m, binfotypes.ImageConfigField) - } - dt, err = json.Marshal(m) return dt, errors.Wrap(err, "failed to marshal config after patch") } @@ -774,7 +722,7 @@ func normalizeLayersAndHistory(ctx context.Context, remote *solver.Remote, histo } // convert between oci and docker media types (or vice versa) if needed - remote.Descriptors = compression.ConvertAllLayerMediaTypes(oci, remote.Descriptors...) + remote.Descriptors = compression.ConvertAllLayerMediaTypes(ctx, oci, remote.Descriptors...) return remote, history } diff --git a/vendor/github.com/moby/buildkit/exporter/exptypes/keys.go b/vendor/github.com/moby/buildkit/exporter/exptypes/keys.go new file mode 100644 index 0000000000..4b568154ff --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/exptypes/keys.go @@ -0,0 +1,15 @@ +package exptypes + +const ( + ExporterEpochKey = "source.date.epoch" +) + +type ExporterOptKey string + +// Options keys supported by all exporters. +var ( + // Clamp produced timestamps. For more information see the + // SOURCE_DATE_EPOCH specification. + // Value: int (number of seconds since Unix epoch) + OptKeySourceDateEpoch ExporterOptKey = "source-date-epoch" +) diff --git a/vendor/github.com/moby/buildkit/exporter/local/export.go b/vendor/github.com/moby/buildkit/exporter/local/export.go index 7d08b172e0..771b7aaf22 100644 --- a/vendor/github.com/moby/buildkit/exporter/local/export.go +++ b/vendor/github.com/moby/buildkit/exporter/local/export.go @@ -4,6 +4,7 @@ import ( "context" "os" "strings" + "sync" "time" "github.com/moby/buildkit/cache" @@ -20,10 +21,6 @@ import ( "golang.org/x/time/rate" ) -const ( - keyAttestationPrefix = "attestation-prefix" -) - type Opt struct { SessionManager *session.Manager } @@ -39,23 +36,12 @@ func New(opt Opt) (exporter.Exporter, error) { } func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { - tm, _, err := epoch.ParseExporterAttrs(opt) - if err != nil { - return nil, err - } - i := &localExporterInstance{ localExporter: e, - opts: CreateFSOpts{ - Epoch: tm, - }, } - - for k, v := range opt { - switch k { - case keyAttestationPrefix: - i.opts.AttestationPrefix = v - } + _, err := i.opts.Load(opt) + if err != nil { + return nil, err } return i, nil @@ -107,6 +93,9 @@ func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source now := time.Now().Truncate(time.Second) + visitedPath := map[string]string{} + var visitedMu sync.Mutex + export := func(ctx context.Context, k string, ref cache.ImmutableRef, attestations []exporter.Attestation) func() error { return func() error { outputFS, cleanup, err := CreateFS(ctx, sessionID, k, ref, attestations, now, e.opts) @@ -117,20 +106,43 @@ func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source defer cleanup() } + if !e.opts.PlatformSplit { + // check for duplicate paths + err = outputFS.Walk(ctx, func(p string, fi os.FileInfo, err error) error { + if fi.IsDir() { + return nil + } + if err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + visitedMu.Lock() + defer visitedMu.Unlock() + if vp, ok := visitedPath[p]; ok { + return errors.Errorf("cannot overwrite %s from %s with %s when split option is disabled", p, vp, k) + } + visitedPath[p] = k + return nil + }) + if err != nil { + return err + } + } + lbl := "copying files" if isMap { lbl += " " + k - st := fstypes.Stat{ - Mode: uint32(os.ModeDir | 0755), - Path: strings.Replace(k, "/", "_", -1), - } - if e.opts.Epoch != nil { - st.ModTime = e.opts.Epoch.UnixNano() - } - - outputFS, err = fsutil.SubDirFS([]fsutil.Dir{{FS: outputFS, Stat: st}}) - if err != nil { - return err + if e.opts.PlatformSplit { + st := fstypes.Stat{ + Mode: uint32(os.ModeDir | 0755), + Path: strings.Replace(k, "/", "_", -1), + } + if e.opts.Epoch != nil { + st.ModTime = e.opts.Epoch.UnixNano() + } + outputFS, err = fsutil.SubDirFS([]fsutil.Dir{{FS: outputFS, Stat: st}}) + if err != nil { + return err + } } } diff --git a/vendor/github.com/moby/buildkit/exporter/local/fs.go b/vendor/github.com/moby/buildkit/exporter/local/fs.go index c5a524aae3..d8e4703ac1 100644 --- a/vendor/github.com/moby/buildkit/exporter/local/fs.go +++ b/vendor/github.com/moby/buildkit/exporter/local/fs.go @@ -3,11 +3,13 @@ package local import ( "context" "encoding/json" + "fmt" "io" "io/fs" "os" "path" "strconv" + "strings" "time" "github.com/docker/docker/pkg/idtools" @@ -15,6 +17,7 @@ import ( "github.com/moby/buildkit/cache" "github.com/moby/buildkit/exporter" "github.com/moby/buildkit/exporter/attestation" + "github.com/moby/buildkit/exporter/util/epoch" "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/result" @@ -25,9 +28,45 @@ import ( fstypes "github.com/tonistiigi/fsutil/types" ) +const ( + keyAttestationPrefix = "attestation-prefix" + // keyPlatformSplit is an exporter option which can be used to split result + // in subfolders when multiple platform references are exported. + keyPlatformSplit = "platform-split" +) + type CreateFSOpts struct { Epoch *time.Time AttestationPrefix string + PlatformSplit bool +} + +func (c *CreateFSOpts) Load(opt map[string]string) (map[string]string, error) { + rest := make(map[string]string) + c.PlatformSplit = true + + var err error + c.Epoch, opt, err = epoch.ParseExporterAttrs(opt) + if err != nil { + return nil, err + } + + for k, v := range opt { + switch k { + case keyAttestationPrefix: + c.AttestationPrefix = v + case keyPlatformSplit: + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value for %s: %s", keyPlatformSplit, v) + } + c.PlatformSplit = b + default: + rest[k] = v + } + } + + return rest, nil } func CreateFS(ctx context.Context, sessionID string, k string, ref cache.ImmutableRef, attestations []exporter.Attestation, defaultTime time.Time, opt CreateFSOpts) (fsutil.FS, func() error, error) { @@ -138,6 +177,11 @@ func CreateFS(ctx context.Context, sessionID string, k string, ref cache.Immutab } name := opt.AttestationPrefix + path.Base(attestations[i].Path) + if !opt.PlatformSplit { + nameExt := path.Ext(name) + namBase := strings.TrimSuffix(name, nameExt) + name = fmt.Sprintf("%s.%s%s", namBase, strings.Replace(k, "/", "_", -1), nameExt) + } if _, ok := names[name]; ok { return nil, nil, errors.Errorf("duplicate attestation path name %s", name) } diff --git a/vendor/github.com/moby/buildkit/exporter/oci/export.go b/vendor/github.com/moby/buildkit/exporter/oci/export.go index 60982f4daf..81ac7857de 100644 --- a/vendor/github.com/moby/buildkit/exporter/oci/export.go +++ b/vendor/github.com/moby/buildkit/exporter/oci/export.go @@ -11,9 +11,7 @@ import ( archiveexporter "github.com/containerd/containerd/images/archive" "github.com/containerd/containerd/leases" - "github.com/containerd/containerd/remotes" "github.com/docker/distribution/reference" - intoto "github.com/in-toto/in-toto-golang/in_toto" "github.com/moby/buildkit/cache" cacheconfig "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/exporter" @@ -29,6 +27,7 @@ import ( "github.com/moby/buildkit/util/progress" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "golang.org/x/sync/errgroup" "google.golang.org/grpc/codes" ) @@ -67,12 +66,11 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp RefCfg: cacheconfig.RefConfig{ Compression: compression.New(compression.Default), }, - BuildInfo: true, - OCITypes: e.opt.Variant == VariantOCI, + OCITypes: e.opt.Variant == VariantOCI, }, } - opt, err := i.opts.Load(opt) + opt, err := i.opts.Load(ctx, opt) if err != nil { return nil, err } @@ -208,40 +206,36 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source return nil, nil, err } - mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore()) + var refs []cache.ImmutableRef if src.Ref != nil { - remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) - if err != nil { - return nil, nil, err - } - remote := remotes[0] - // unlazy before tar export as the tar writer does not handle - // layer blobs in parallel (whereas unlazy does) - if unlazier, ok := remote.Provider.(cache.Unlazier); ok { - if err := unlazier.Unlazy(ctx); err != nil { - return nil, nil, err - } - } - for _, desc := range remote.Descriptors { - mprovider.Add(desc.Digest, remote.Provider) - } + refs = append(refs, src.Ref) } - if len(src.Refs) > 0 { - for _, r := range src.Refs { - remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + for _, ref := range src.Refs { + refs = append(refs, ref) + } + eg, egCtx := errgroup.WithContext(ctx) + mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore()) + for _, ref := range refs { + ref := ref + eg.Go(func() error { + remotes, err := ref.GetRemotes(egCtx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) if err != nil { - return nil, nil, err + return err } remote := remotes[0] if unlazier, ok := remote.Provider.(cache.Unlazier); ok { - if err := unlazier.Unlazy(ctx); err != nil { - return nil, nil, err + if err := unlazier.Unlazy(egCtx); err != nil { + return err } } for _, desc := range remote.Descriptors { mprovider.Add(desc.Digest, remote.Provider) } - } + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, nil, err } if e.tar { @@ -267,7 +261,6 @@ func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source } report(nil) } else { - ctx = remotes.WithMediaTypeKeyPrefix(ctx, intoto.PayloadType, "intoto") store := sessioncontent.NewCallerStore(caller, "export") if err != nil { return nil, nil, err diff --git a/vendor/github.com/moby/buildkit/exporter/tar/export.go b/vendor/github.com/moby/buildkit/exporter/tar/export.go index 4d136c89c1..7259f6b24a 100644 --- a/vendor/github.com/moby/buildkit/exporter/tar/export.go +++ b/vendor/github.com/moby/buildkit/exporter/tar/export.go @@ -3,7 +3,6 @@ package local import ( "context" "os" - "strconv" "strings" "time" @@ -20,15 +19,6 @@ import ( fstypes "github.com/tonistiigi/fsutil/types" ) -const ( - attestationPrefixKey = "attestation-prefix" - - // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was - // already found to use a non-distributable media type. - // When this option is not set, the exporter will change the media type of the layer to a distributable one. - preferNondistLayersKey = "prefer-nondist-layers" -) - type Opt struct { SessionManager *session.Manager } @@ -45,33 +35,18 @@ func New(opt Opt) (exporter.Exporter, error) { func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { li := &localExporterInstance{localExporter: e} - - tm, opt, err := epoch.ParseExporterAttrs(opt) + _, err := li.opts.Load(opt) if err != nil { return nil, err } - li.opts.Epoch = tm - - for k, v := range opt { - switch k { - case preferNondistLayersKey: - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value for %s: %s", preferNondistLayersKey, v) - } - li.preferNonDist = b - case attestationPrefixKey: - li.opts.AttestationPrefix = v - } - } + _ = opt return li, nil } type localExporterInstance struct { *localExporter - opts local.CreateFSOpts - preferNonDist bool + opts local.CreateFSOpts } func (e *localExporterInstance) Name() string { diff --git a/vendor/github.com/moby/buildkit/exporter/util/epoch/parse.go b/vendor/github.com/moby/buildkit/exporter/util/epoch/parse.go index 63f806e1b7..d5a0146081 100644 --- a/vendor/github.com/moby/buildkit/exporter/util/epoch/parse.go +++ b/vendor/github.com/moby/buildkit/exporter/util/epoch/parse.go @@ -5,14 +5,12 @@ import ( "time" "github.com/moby/buildkit/exporter" - "github.com/moby/buildkit/exporter/containerimage/exptypes" + commonexptypes "github.com/moby/buildkit/exporter/exptypes" "github.com/pkg/errors" ) const ( frontendSourceDateEpochArg = "build-arg:SOURCE_DATE_EPOCH" - - KeySourceDateEpoch = "source-date-epoch" ) func ParseBuildArgs(opt map[string]string) (string, bool) { @@ -27,7 +25,7 @@ func ParseExporterAttrs(opt map[string]string) (*time.Time, map[string]string, e for k, v := range opt { switch k { - case KeySourceDateEpoch: + case string(commonexptypes.OptKeySourceDateEpoch): var err error tm, err = parseTime(k, v) if err != nil { @@ -42,7 +40,7 @@ func ParseExporterAttrs(opt map[string]string) (*time.Time, map[string]string, e } func ParseSource(inp *exporter.Source) (*time.Time, bool, error) { - if v, ok := inp.Metadata[exptypes.ExporterEpochKey]; ok { + if v, ok := inp.Metadata[commonexptypes.ExporterEpochKey]; ok { epoch, err := parseTime("", string(v)) if err != nil { return nil, false, errors.Wrapf(err, "invalid SOURCE_DATE_EPOCH from frontend: %q", v) diff --git a/vendor/github.com/moby/buildkit/frontend/attestations/sbom/sbom.go b/vendor/github.com/moby/buildkit/frontend/attestations/sbom/sbom.go index 113797b213..c52229c284 100644 --- a/vendor/github.com/moby/buildkit/frontend/attestations/sbom/sbom.go +++ b/vendor/github.com/moby/buildkit/frontend/attestations/sbom/sbom.go @@ -31,14 +31,14 @@ const ( // build-contexts or multi-stage builds. Handling these separately allows the // scanner to optionally ignore these or to mark them as such in the // attestation. -type Scanner func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[llb.State], error) +type Scanner func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[*llb.State], error) -func CreateSBOMScanner(ctx context.Context, resolver llb.ImageMetaResolver, scanner string) (Scanner, error) { +func CreateSBOMScanner(ctx context.Context, resolver llb.ImageMetaResolver, scanner string, resolveOpt llb.ResolveImageConfigOpt) (Scanner, error) { if scanner == "" { return nil, nil } - _, dt, err := resolver.ResolveImageConfig(ctx, scanner, llb.ResolveImageConfigOpt{}) + scanner, _, dt, err := resolver.ResolveImageConfig(ctx, scanner, resolveOpt) if err != nil { return nil, err } @@ -55,7 +55,7 @@ func CreateSBOMScanner(ctx context.Context, resolver llb.ImageMetaResolver, scan return nil, errors.Errorf("scanner %s does not have cmd", scanner) } - return func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[llb.State], error) { + return func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[*llb.State], error) { var env []string env = append(env, cfg.Config.Env...) env = append(env, "BUILDKIT_SCAN_DESTINATION="+outDir) @@ -86,9 +86,9 @@ func CreateSBOMScanner(ctx context.Context, resolver llb.ImageMetaResolver, scan } stsbom := runscan.AddMount(outDir, llb.Scratch()) - return result.Attestation[llb.State]{ + return result.Attestation[*llb.State]{ Kind: gatewaypb.AttestationKindBundle, - Ref: stsbom, + Ref: &stsbom, Metadata: map[string][]byte{ result.AttestationReasonKey: []byte(result.AttestationReasonSBOM), result.AttestationSBOMCore: []byte(CoreSBOMName), @@ -100,7 +100,7 @@ func CreateSBOMScanner(ctx context.Context, resolver llb.ImageMetaResolver, scan }, nil } -func HasSBOM[T any](res *result.Result[T]) bool { +func HasSBOM[T comparable](res *result.Result[T]) bool { for _, as := range res.Attestations { for _, a := range as { if a.InToto.PredicateType == intoto.PredicateSPDX { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go index aafd9c9a73..40ab3de2c0 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go @@ -1,31 +1,18 @@ package builder import ( - "archive/tar" - "bytes" "context" - "encoding/csv" - "encoding/json" - "fmt" - "net" - "path" - "regexp" - "strconv" "strings" - "time" + "sync" "github.com/containerd/containerd/platforms" - "github.com/docker/distribution/reference" - "github.com/docker/go-units" - controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/exporter/containerimage/image" "github.com/moby/buildkit/frontend" - "github.com/moby/buildkit/frontend/attestations" "github.com/moby/buildkit/frontend/attestations/sbom" "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" - "github.com/moby/buildkit/frontend/dockerfile/dockerignore" "github.com/moby/buildkit/frontend/dockerfile/parser" + "github.com/moby/buildkit/frontend/dockerui" "github.com/moby/buildkit/frontend/gateway/client" gwpb "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/frontend/subrequests/outline" @@ -33,352 +20,32 @@ import ( "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/solver/result" - "github.com/moby/buildkit/util/gitutil" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "golang.org/x/sync/errgroup" ) const ( - DefaultLocalNameContext = "context" - DefaultLocalNameDockerfile = "dockerfile" - defaultDockerfileName = "Dockerfile" - dockerignoreFilename = ".dockerignore" - - buildArgPrefix = "build-arg:" - labelPrefix = "label:" - contextPrefix = "context:" - inputMetadataPrefix = "input-metadata:" - - keyTarget = "target" - keyFilename = "filename" - keyCacheFrom = "cache-from" // for registry only. deprecated in favor of keyCacheImports - keyCacheImports = "cache-imports" // JSON representation of []CacheOptionsEntry - keyCgroupParent = "cgroup-parent" - keyContextSubDir = "contextsubdir" - keyForceNetwork = "force-network-mode" - keyGlobalAddHosts = "add-hosts" - keyHostname = "hostname" - keyImageResolveMode = "image-resolve-mode" - keyMultiPlatform = "multi-platform" - keyNameContext = "contextkey" - keyNameDockerfile = "dockerfilekey" - keyNoCache = "no-cache" - keyShmSize = "shm-size" - keyTargetPlatform = "platform" - keyUlimit = "ulimit" - keyRequestID = "requestid" - // Don't forget to update frontend documentation if you add // a new build-arg: frontend/dockerfile/docs/reference.md - keyCacheNSArg = "build-arg:BUILDKIT_CACHE_MOUNT_NS" - keyContextKeepGitDirArg = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR" - keyHostnameArg = "build-arg:BUILDKIT_SANDBOX_HOSTNAME" - keyMultiPlatformArg = "build-arg:BUILDKIT_MULTI_PLATFORM" - keySyntaxArg = "build-arg:BUILDKIT_SYNTAX" - keySourceDateEpoch = "build-arg:SOURCE_DATE_EPOCH" + keySyntaxArg = "build-arg:BUILDKIT_SYNTAX" ) -var httpPrefix = regexp.MustCompile(`^https?://`) - func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) { - opts := c.BuildOpts().Opts - caps := c.BuildOpts().LLBCaps - gwcaps := c.BuildOpts().Caps - - if err := caps.Supports(pb.CapFileBase); err != nil { - return nil, errors.Wrap(err, "needs BuildKit 0.5 or later") + bc, err := dockerui.NewClient(c) + if err != nil { + return nil, err } - if opts["override-copy-image"] != "" { - return nil, errors.New("support for \"override-copy-image\" was removed in BuildKit 0.11") - } - if v, ok := opts["build-arg:BUILDKIT_DISABLE_FILEOP"]; ok { - if b, err := strconv.ParseBool(v); err == nil && b { - return nil, errors.New("support for \"build-arg:BUILDKIT_DISABLE_FILEOP\" was removed in BuildKit 0.11") - } - } - + opts := bc.BuildOpts().Opts allowForward, capsError := validateCaps(opts["frontend.caps"]) if !allowForward && capsError != nil { return nil, capsError } - marshalOpts := []llb.ConstraintsOpt{llb.WithCaps(caps)} - - localNameContext := DefaultLocalNameContext - if v, ok := opts[keyNameContext]; ok { - localNameContext = v - } - - forceLocalDockerfile := false - localNameDockerfile := DefaultLocalNameDockerfile - if v, ok := opts[keyNameDockerfile]; ok { - forceLocalDockerfile = true - localNameDockerfile = v - } - - defaultBuildPlatform := platforms.DefaultSpec() - if workers := c.BuildOpts().Workers; len(workers) > 0 && len(workers[0].Platforms) > 0 { - defaultBuildPlatform = workers[0].Platforms[0] - } - - buildPlatforms := []ocispecs.Platform{defaultBuildPlatform} - targetPlatforms := []*ocispecs.Platform{nil} - if v := opts[keyTargetPlatform]; v != "" { - var err error - targetPlatforms, err = parsePlatforms(v) - if err != nil { - return nil, err - } - } - - resolveMode, err := parseResolveMode(opts[keyImageResolveMode]) + src, err := bc.ReadEntrypoint(ctx, "Dockerfile") if err != nil { return nil, err } - extraHosts, err := parseExtraHosts(opts[keyGlobalAddHosts]) - if err != nil { - return nil, errors.Wrap(err, "failed to parse additional hosts") - } - - shmSize, err := parseShmSize(opts[keyShmSize]) - if err != nil { - return nil, errors.Wrap(err, "failed to parse shm size") - } - - ulimit, err := parseUlimits(opts[keyUlimit]) - if err != nil { - return nil, errors.Wrap(err, "failed to parse ulimit") - } - - defaultNetMode, err := parseNetMode(opts[keyForceNetwork]) - if err != nil { - return nil, err - } - - filename := opts[keyFilename] - if filename == "" { - filename = defaultDockerfileName - } - - var ignoreCache []string - if v, ok := opts[keyNoCache]; ok { - if v == "" { - ignoreCache = []string{} // means all stages - } else { - ignoreCache = strings.Split(v, ",") - } - } - - name := "load build definition from " + filename - - filenames := []string{filename, filename + ".dockerignore"} - - // dockerfile is also supported casing moby/moby#10858 - if path.Base(filename) == defaultDockerfileName { - filenames = append(filenames, path.Join(path.Dir(filename), strings.ToLower(defaultDockerfileName))) - } - - src := llb.Local(localNameDockerfile, - llb.FollowPaths(filenames), - llb.SessionID(c.BuildOpts().SessionID), - llb.SharedKeyHint(localNameDockerfile), - dockerfile2llb.WithInternalName(name), - llb.Differ(llb.DiffNone, false), - ) - - var buildContext *llb.State - isNotLocalContext := false - keepGit := false - if v, err := strconv.ParseBool(opts[keyContextKeepGitDirArg]); err == nil { - keepGit = v - } - if st, ok := detectGitContext(opts[localNameContext], keepGit); ok { - if !forceLocalDockerfile { - src = *st - } - buildContext = st - } else if httpPrefix.MatchString(opts[localNameContext]) { - httpContext := llb.HTTP(opts[localNameContext], llb.Filename("context"), dockerfile2llb.WithInternalName("load remote build context")) - def, err := httpContext.Marshal(ctx, marshalOpts...) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal httpcontext") - } - res, err := c.Solve(ctx, client.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return nil, errors.Wrapf(err, "failed to resolve httpcontext") - } - - ref, err := res.SingleRef() - if err != nil { - return nil, err - } - - dt, err := ref.ReadFile(ctx, client.ReadRequest{ - Filename: "context", - Range: &client.FileRange{ - Length: 1024, - }, - }) - if err != nil { - return nil, errors.Wrapf(err, "failed to read downloaded context") - } - if isArchive(dt) { - bc := llb.Scratch().File(llb.Copy(httpContext, "/context", "/", &llb.CopyInfo{ - AttemptUnpack: true, - })) - if !forceLocalDockerfile { - src = bc - } - buildContext = &bc - } else { - filename = "context" - if !forceLocalDockerfile { - src = httpContext - } - buildContext = &httpContext - isNotLocalContext = true - } - } else if (&gwcaps).Supports(gwpb.CapFrontendInputs) == nil { - inputs, err := c.Inputs(ctx) - if err != nil { - return nil, errors.Wrapf(err, "failed to get frontend inputs") - } - - if !forceLocalDockerfile { - inputDockerfile, ok := inputs[DefaultLocalNameDockerfile] - if ok { - src = inputDockerfile - } - } - - inputCtx, ok := inputs[DefaultLocalNameContext] - if ok { - buildContext = &inputCtx - isNotLocalContext = true - } - } - - if buildContext != nil { - if sub, ok := opts[keyContextSubDir]; ok { - buildContext = scopeToSubDir(buildContext, sub) - } - } - - def, err := src.Marshal(ctx, marshalOpts...) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal local source") - } - - defVtx, err := def.Head() - if err != nil { - return nil, err - } - - var sourceMap *llb.SourceMap - - eg, ctx2 := errgroup.WithContext(ctx) - var dtDockerfile []byte - var dtDockerignore []byte - var dtDockerignoreDefault []byte - eg.Go(func() error { - res, err := c.Solve(ctx2, client.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return errors.Wrapf(err, "failed to resolve dockerfile") - } - - ref, err := res.SingleRef() - if err != nil { - return err - } - - dtDockerfile, err = ref.ReadFile(ctx2, client.ReadRequest{ - Filename: filename, - }) - if err != nil { - fallback := false - if path.Base(filename) == defaultDockerfileName { - var err1 error - dtDockerfile, err1 = ref.ReadFile(ctx2, client.ReadRequest{ - Filename: path.Join(path.Dir(filename), strings.ToLower(defaultDockerfileName)), - }) - if err1 == nil { - fallback = true - } - } - if !fallback { - return errors.Wrapf(err, "failed to read dockerfile") - } - } - - sourceMap = llb.NewSourceMap(&src, filename, dtDockerfile) - sourceMap.Definition = def - - dt, err := ref.ReadFile(ctx2, client.ReadRequest{ - Filename: filename + ".dockerignore", - }) - if err == nil { - dtDockerignore = dt - } - return nil - }) - var excludes []string - if !isNotLocalContext { - eg.Go(func() error { - dockerignoreState := buildContext - if dockerignoreState == nil { - st := llb.Local(localNameContext, - llb.SessionID(c.BuildOpts().SessionID), - llb.FollowPaths([]string{dockerignoreFilename}), - llb.SharedKeyHint(localNameContext+"-"+dockerignoreFilename), - dockerfile2llb.WithInternalName("load "+dockerignoreFilename), - llb.Differ(llb.DiffNone, false), - ) - dockerignoreState = &st - } - def, err := dockerignoreState.Marshal(ctx, marshalOpts...) - if err != nil { - return err - } - res, err := c.Solve(ctx2, client.SolveRequest{ - Definition: def.ToPB(), - }) - if err != nil { - return err - } - ref, err := res.SingleRef() - if err != nil { - return err - } - dtDockerignoreDefault, err = ref.ReadFile(ctx2, client.ReadRequest{ - Filename: dockerignoreFilename, - }) - if err != nil { - return nil - } - return nil - }) - } - - if err := eg.Wait(); err != nil { - return nil, err - } - - if dtDockerignore == nil { - dtDockerignore = dtDockerignoreDefault - } - if dtDockerignore != nil { - excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dtDockerignore)) - if err != nil { - return nil, errors.Wrap(err, "failed to parse dockerignore") - } - } - if _, ok := opts["cmdline"]; !ok { if cmdline, ok := opts[keySyntaxArg]; ok { p := strings.SplitN(strings.TrimSpace(cmdline), " ", 2) @@ -387,10 +54,10 @@ func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) { return nil, errors.Wrapf(err, "failed with %s = %s", keySyntaxArg, cmdline) } return res, err - } else if ref, cmdline, loc, ok := parser.DetectSyntax(dtDockerfile); ok { + } else if ref, cmdline, loc, ok := parser.DetectSyntax(src.Data); ok { res, err := forwardGateway(ctx, c, ref, cmdline) if err != nil && len(errdefs.Sources(err)) == 0 { - return nil, wrapSource(err, sourceMap, loc) + return nil, wrapSource(err, src.SourceMap, loc) } return res, err } @@ -400,231 +67,111 @@ func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) { return nil, capsError } - if res, ok, err := checkSubRequest(ctx, opts); ok { - return res, err - } - - exportMap := len(targetPlatforms) > 1 - - if v := opts[keyMultiPlatformArg]; v != "" { - opts[keyMultiPlatform] = v - } - if v := opts[keyMultiPlatform]; v != "" { - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Errorf("invalid boolean value %s", v) - } - if !b && exportMap { - return nil, errors.Errorf("returning multiple target platforms is not allowed") - } - exportMap = b - } - - expPlatforms := &exptypes.Platforms{ - Platforms: make([]exptypes.Platform, len(targetPlatforms)), - } - res := client.NewResult() - - if v, ok := opts[keyHostnameArg]; ok && len(v) > 0 { - opts[keyHostname] = v - } - - epoch, err := parseSourceDateEpoch(opts[keySourceDateEpoch]) - if err != nil { - return nil, err - } - - target := opts[keyTarget] convertOpt := dockerfile2llb.ConvertOpt{ - Target: target, - MetaResolver: c, - BuildArgs: filter(opts, buildArgPrefix), - Labels: filter(opts, labelPrefix), - CacheIDNamespace: opts[keyCacheNSArg], - SessionID: c.BuildOpts().SessionID, - BuildContext: buildContext, - Excludes: excludes, - IgnoreCache: ignoreCache, - TargetPlatform: targetPlatforms[0], - BuildPlatforms: buildPlatforms, - ImageResolveMode: resolveMode, - PrefixPlatform: exportMap, - ExtraHosts: extraHosts, - ShmSize: shmSize, - Ulimit: ulimit, - CgroupParent: opts[keyCgroupParent], - ForceNetMode: defaultNetMode, - LLBCaps: &caps, - SourceMap: sourceMap, - Hostname: opts[keyHostname], - SourceDateEpoch: epoch, + Config: bc.Config, + Client: bc, + SourceMap: src.SourceMap, + MetaResolver: c, Warn: func(msg, url string, detail [][]byte, location *parser.Range) { - c.Warn(ctx, defVtx, msg, warnOpts(sourceMap, location, detail, url)) + src.Warn(ctx, msg, warnOpts(location, detail, url)) }, - ContextByName: contextByNameFunc(c, c.BuildOpts().SessionID), + } + + if res, ok, err := bc.HandleSubrequest(ctx, dockerui.RequestHandler{ + Outline: func(ctx context.Context) (*outline.Outline, error) { + return dockerfile2llb.Dockefile2Outline(ctx, src.Data, convertOpt) + }, + ListTargets: func(ctx context.Context) (*targets.List, error) { + return dockerfile2llb.ListTargets(ctx, src.Data) + }, + }); err != nil { + return nil, err + } else if ok { + return res, nil } defer func() { var el *parser.ErrorLocation if errors.As(err, &el) { - err = wrapSource(err, sourceMap, el.Location) + err = wrapSource(err, src.SourceMap, el.Location) } }() - if req, ok := opts[keyRequestID]; ok { - switch req { - case outline.SubrequestsOutlineDefinition.Name: - o, err := dockerfile2llb.Dockefile2Outline(ctx, dtDockerfile, convertOpt) - if err != nil { - return nil, err - } - return o.ToResult() - case targets.SubrequestsTargetsDefinition.Name: - targets, err := dockerfile2llb.ListTargets(ctx, dtDockerfile) - if err != nil { - return nil, err - } - return targets.ToResult() - default: - return nil, errdefs.NewUnsupportedSubrequestError(req) - } - } - var scanner sbom.Scanner - attests, err := attestations.Parse(opts) - if err != nil { - return nil, err - } - if attrs, ok := attests[attestations.KeyTypeSbom]; ok { - src, ok := attrs["generator"] - if !ok { - return nil, errors.Errorf("sbom scanner cannot be empty") - } - ref, err := reference.ParseNormalizedNamed(src) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse sbom scanner %s", src) - } - ref = reference.TagNameOnly(ref) - - scanner, err = sbom.CreateSBOMScanner(ctx, c, ref.String()) + if bc.SBOM != nil { + scanner, err = sbom.CreateSBOMScanner(ctx, c, bc.SBOM.Generator, llb.ResolveImageConfigOpt{ + ResolveMode: opts["image-resolve-mode"], + }) if err != nil { return nil, err } } - scanTargets := make([]*dockerfile2llb.SBOMTargets, len(targetPlatforms)) - eg, ctx2 = errgroup.WithContext(ctx) + scanTargets := sync.Map{} - for i, tp := range targetPlatforms { - func(i int, tp *ocispecs.Platform) { - eg.Go(func() (err error) { - opt := convertOpt - opt.TargetPlatform = tp - if i != 0 { - opt.Warn = nil - } - opt.ContextByName = contextByNameFunc(c, c.BuildOpts().SessionID) - st, img, scanTarget, err := dockerfile2llb.Dockerfile2LLB(ctx2, dtDockerfile, opt) - if err != nil { - return err - } + rb, err := bc.Build(ctx, func(ctx context.Context, platform *ocispecs.Platform, idx int) (client.Reference, *image.Image, error) { + opt := convertOpt + opt.TargetPlatform = platform + if idx != 0 { + opt.Warn = nil + } - def, err := st.Marshal(ctx2) - if err != nil { - return errors.Wrapf(err, "failed to marshal LLB definition") - } + st, img, scanTarget, err := dockerfile2llb.Dockerfile2LLB(ctx, src.Data, opt) + if err != nil { + return nil, nil, err + } - config, err := json.Marshal(img) - if err != nil { - return errors.Wrapf(err, "failed to marshal image config") - } + def, err := st.Marshal(ctx) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to marshal LLB definition") + } - var cacheImports []client.CacheOptionsEntry - // new API - if cacheImportsStr := opts[keyCacheImports]; cacheImportsStr != "" { - var cacheImportsUM []controlapi.CacheOptionsEntry - if err := json.Unmarshal([]byte(cacheImportsStr), &cacheImportsUM); err != nil { - return errors.Wrapf(err, "failed to unmarshal %s (%q)", keyCacheImports, cacheImportsStr) - } - for _, um := range cacheImportsUM { - cacheImports = append(cacheImports, client.CacheOptionsEntry{Type: um.Type, Attrs: um.Attrs}) - } - } - // old API - if cacheFromStr := opts[keyCacheFrom]; cacheFromStr != "" { - cacheFrom := strings.Split(cacheFromStr, ",") - for _, s := range cacheFrom { - im := client.CacheOptionsEntry{ - Type: "registry", - Attrs: map[string]string{ - "ref": s, - }, - } - // FIXME(AkihiroSuda): skip append if already exists - cacheImports = append(cacheImports, im) - } - } + r, err := c.Solve(ctx, client.SolveRequest{ + Definition: def.ToPB(), + CacheImports: bc.CacheImports, + }) + if err != nil { + return nil, nil, err + } - r, err := c.Solve(ctx2, client.SolveRequest{ - Definition: def.ToPB(), - CacheImports: cacheImports, - }) - if err != nil { - return err - } + ref, err := r.SingleRef() + if err != nil { + return nil, nil, err + } - ref, err := r.SingleRef() - if err != nil { - return err - } + p := platforms.DefaultSpec() + if platform != nil { + p = *platform + } + scanTargets.Store(platforms.Format(platforms.Normalize(p)), scanTarget) - p := platforms.DefaultSpec() - if tp != nil { - p = *tp - } - p = platforms.Normalize(p) - k := platforms.Format(p) - - if !exportMap { - res.AddMeta(exptypes.ExporterImageConfigKey, config) - res.SetRef(ref) - - expPlatforms.Platforms[i] = exptypes.Platform{ - ID: k, - Platform: p, - } - } else { - res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, k), config) - res.AddRef(k, ref) - expPlatforms.Platforms[i] = exptypes.Platform{ - ID: k, - Platform: p, - } - } - scanTargets[i] = scanTarget - return nil - }) - }(i, tp) - } - - if err := eg.Wait(); err != nil { + return ref, img, nil + }) + if err != nil { return nil, err } if scanner != nil { - for i, p := range expPlatforms.Platforms { - target := scanTargets[i] + if err := rb.EachPlatform(ctx, func(ctx context.Context, id string, p ocispecs.Platform) error { + v, ok := scanTargets.Load(id) + if !ok { + return errors.Errorf("no scan targets for %s", id) + } + target, ok := v.(*dockerfile2llb.SBOMTargets) + if !ok { + return errors.Errorf("invalid scan targets for %T", v) + } var opts []llb.ConstraintsOpt if target.IgnoreCache { opts = append(opts, llb.IgnoreCache) } - att, err := scanner(ctx, p.ID, target.Core, target.Extras, opts...) + att, err := scanner(ctx, id, target.Core, target.Extras, opts...) if err != nil { - return nil, err + return err } - attSolve, err := result.ConvertAttestation(&att, func(st llb.State) (client.Reference, error) { + attSolve, err := result.ConvertAttestation(&att, func(st *llb.State) (client.Reference, error) { def, err := st.Marshal(ctx) if err != nil { return nil, err @@ -638,19 +185,16 @@ func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) { return r.Ref, nil }) if err != nil { - return nil, err + return err } - res.AddAttestation(p.ID, *attSolve) + rb.AddAttestation(id, *attSolve) + return nil + }); err != nil { + return nil, err } } - dt, err := json.Marshal(expPlatforms) - if err != nil { - return nil, err - } - res.AddMeta(exptypes.ExporterPlatformsKey, dt) - - return res, nil + return rb.Finalize() } func forwardGateway(ctx context.Context, c client.Client, ref string, cmdline string) (*client.Result, error) { @@ -686,173 +230,11 @@ func forwardGateway(ctx context.Context, c client.Client, ref string, cmdline st }) } -func filter(opt map[string]string, key string) map[string]string { - m := map[string]string{} - for k, v := range opt { - if strings.HasPrefix(k, key) { - m[strings.TrimPrefix(k, key)] = v - } - } - return m -} - -func detectGitContext(ref string, keepGit bool) (*llb.State, bool) { - g, err := gitutil.ParseGitRef(ref) - if err != nil { - return nil, false - } - commit := g.Commit - if g.SubDir != "" { - commit += ":" + g.SubDir - } - gitOpts := []llb.GitOption{dockerfile2llb.WithInternalName("load git source " + ref)} - if keepGit { - gitOpts = append(gitOpts, llb.KeepGitDir()) - } - - st := llb.Git(g.Remote, commit, gitOpts...) - return &st, true -} - -func isArchive(header []byte) bool { - for _, m := range [][]byte{ - {0x42, 0x5A, 0x68}, // bzip2 - {0x1F, 0x8B, 0x08}, // gzip - {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, // xz - } { - if len(header) < len(m) { - continue - } - if bytes.Equal(m, header[:len(m)]) { - return true - } - } - - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} - -func parsePlatforms(v string) ([]*ocispecs.Platform, error) { - var pp []*ocispecs.Platform - for _, v := range strings.Split(v, ",") { - p, err := platforms.Parse(v) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse target platform %s", v) - } - p = platforms.Normalize(p) - pp = append(pp, &p) - } - return pp, nil -} - -func parseResolveMode(v string) (llb.ResolveMode, error) { - switch v { - case pb.AttrImageResolveModeDefault, "": - return llb.ResolveModeDefault, nil - case pb.AttrImageResolveModeForcePull: - return llb.ResolveModeForcePull, nil - case pb.AttrImageResolveModePreferLocal: - return llb.ResolveModePreferLocal, nil - default: - return 0, errors.Errorf("invalid image-resolve-mode: %s", v) - } -} - -func parseExtraHosts(v string) ([]llb.HostIP, error) { - if v == "" { - return nil, nil - } - out := make([]llb.HostIP, 0) - csvReader := csv.NewReader(strings.NewReader(v)) - fields, err := csvReader.Read() - if err != nil { - return nil, err - } - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { - return nil, errors.Errorf("invalid key-value pair %s", field) - } - key := strings.ToLower(parts[0]) - val := strings.ToLower(parts[1]) - ip := net.ParseIP(val) - if ip == nil { - return nil, errors.Errorf("failed to parse IP %s", val) - } - out = append(out, llb.HostIP{Host: key, IP: ip}) - } - return out, nil -} - -func parseShmSize(v string) (int64, error) { - if len(v) == 0 { - return 0, nil - } - kb, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return 0, err - } - return kb, nil -} - -func parseUlimits(v string) ([]pb.Ulimit, error) { - if v == "" { - return nil, nil - } - out := make([]pb.Ulimit, 0) - csvReader := csv.NewReader(strings.NewReader(v)) - fields, err := csvReader.Read() - if err != nil { - return nil, err - } - for _, field := range fields { - ulimit, err := units.ParseUlimit(field) - if err != nil { - return nil, err - } - out = append(out, pb.Ulimit{ - Name: ulimit.Name, - Soft: ulimit.Soft, - Hard: ulimit.Hard, - }) - } - return out, nil -} - -func parseNetMode(v string) (pb.NetMode, error) { - if v == "" { - return llb.NetModeSandbox, nil - } - switch v { - case "none": - return llb.NetModeNone, nil - case "host": - return llb.NetModeHost, nil - case "sandbox": - return llb.NetModeSandbox, nil - default: - return 0, errors.Errorf("invalid netmode %s", v) - } -} - -func scopeToSubDir(c *llb.State, dir string) *llb.State { - bc := llb.Scratch().File(llb.Copy(*c, dir, "/", &llb.CopyInfo{ - CopyDirContentsOnly: true, - })) - return &bc -} - -func warnOpts(sm *llb.SourceMap, r *parser.Range, detail [][]byte, url string) client.WarnOpts { +func warnOpts(r *parser.Range, detail [][]byte, url string) client.WarnOpts { opts := client.WarnOpts{Level: 1, Detail: detail, URL: url} if r == nil { return opts } - opts.SourceInfo = &pb.SourceInfo{ - Data: sm.Data, - Filename: sm.Filename, - Definition: sm.Definition.ToPB(), - } opts.Range = []*pb.Range{{ Start: pb.Position{ Line: int32(r.Start.Line), @@ -866,238 +248,6 @@ func warnOpts(sm *llb.SourceMap, r *parser.Range, detail [][]byte, url string) c return opts } -func contextByNameFunc(c client.Client, sessionID string) func(context.Context, string, string, *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, error) { - return func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, error) { - named, err := reference.ParseNormalizedNamed(name) - if err != nil { - return nil, nil, errors.Wrapf(err, "invalid context name %s", name) - } - name = strings.TrimSuffix(reference.FamiliarString(named), ":latest") - - if p == nil { - pp := platforms.Normalize(platforms.DefaultSpec()) - p = &pp - } - if p != nil { - pname := name + "::" + platforms.Format(platforms.Normalize(*p)) - st, img, err := contextByName(ctx, c, sessionID, name, pname, p, resolveMode) - if err != nil { - return nil, nil, err - } - if st != nil { - return st, img, nil - } - } - return contextByName(ctx, c, sessionID, name, name, p, resolveMode) - } -} - -func contextByName(ctx context.Context, c client.Client, sessionID, name string, pname string, platform *ocispecs.Platform, resolveMode string) (*llb.State, *dockerfile2llb.Image, error) { - opts := c.BuildOpts().Opts - v, ok := opts[contextPrefix+pname] - if !ok { - return nil, nil, nil - } - - vv := strings.SplitN(v, ":", 2) - if len(vv) != 2 { - return nil, nil, errors.Errorf("invalid context specifier %s for %s", v, pname) - } - // allow git@ without protocol for SSH URLs for backwards compatibility - if strings.HasPrefix(vv[0], "git@") { - vv[0] = "git" - } - switch vv[0] { - case "docker-image": - ref := strings.TrimPrefix(vv[1], "//") - if ref == "scratch" { - st := llb.Scratch() - return &st, nil, nil - } - - imgOpt := []llb.ImageOption{ - llb.WithCustomName("[context " + pname + "] " + ref), - } - if platform != nil { - imgOpt = append(imgOpt, llb.Platform(*platform)) - } - - named, err := reference.ParseNormalizedNamed(ref) - if err != nil { - return nil, nil, err - } - - named = reference.TagNameOnly(named) - - _, data, err := c.ResolveImageConfig(ctx, named.String(), llb.ResolveImageConfigOpt{ - Platform: platform, - ResolveMode: resolveMode, - LogName: fmt.Sprintf("[context %s] load metadata for %s", pname, ref), - ResolverType: llb.ResolverTypeRegistry, - }) - if err != nil { - return nil, nil, err - } - - var img dockerfile2llb.Image - if err := json.Unmarshal(data, &img); err != nil { - return nil, nil, err - } - img.Created = nil - - st := llb.Image(ref, imgOpt...) - st, err = st.WithImageConfig(data) - if err != nil { - return nil, nil, err - } - return &st, &img, nil - case "git": - st, ok := detectGitContext(v, true) - if !ok { - return nil, nil, errors.Errorf("invalid git context %s", v) - } - return st, nil, nil - case "http", "https": - st, ok := detectGitContext(v, true) - if !ok { - httpst := llb.HTTP(v, llb.WithCustomName("[context "+pname+"] "+v)) - st = &httpst - } - return st, nil, nil - case "oci-layout": - refSpec := strings.TrimPrefix(vv[1], "//") - ref, err := reference.Parse(refSpec) - if err != nil { - return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", refSpec) - } - named, ok := ref.(reference.Named) - if !ok { - return nil, nil, errors.Errorf("oci-layout reference %q has no name", ref.String()) - } - dgstd, ok := named.(reference.Digested) - if !ok { - return nil, nil, errors.Errorf("oci-layout reference %q has no digest", named.String()) - } - - // for the dummy ref primarily used in log messages, we can use the - // original name, since the store key may not be significant - dummyRef, err := reference.ParseNormalizedNamed(name) - if err != nil { - return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", name) - } - dummyRef, err = reference.WithDigest(dummyRef, dgstd.Digest()) - if err != nil { - return nil, nil, errors.Wrapf(err, "could not wrap %q with digest", name) - } - - _, data, err := c.ResolveImageConfig(ctx, dummyRef.String(), llb.ResolveImageConfigOpt{ - Platform: platform, - ResolveMode: resolveMode, - LogName: fmt.Sprintf("[context %s] load metadata for %s", pname, dummyRef.String()), - ResolverType: llb.ResolverTypeOCILayout, - Store: llb.ResolveImageConfigOptStore{ - SessionID: sessionID, - StoreID: named.Name(), - }, - }) - if err != nil { - return nil, nil, err - } - - var img dockerfile2llb.Image - if err := json.Unmarshal(data, &img); err != nil { - return nil, nil, errors.Wrap(err, "could not parse oci-layout image config") - } - - ociOpt := []llb.OCILayoutOption{ - llb.WithCustomName("[context " + pname + "] OCI load from client"), - llb.OCIStore(c.BuildOpts().SessionID, named.Name()), - } - if platform != nil { - ociOpt = append(ociOpt, llb.Platform(*platform)) - } - st := llb.OCILayout( - dummyRef.String(), - ociOpt..., - ) - st, err = st.WithImageConfig(data) - if err != nil { - return nil, nil, err - } - return &st, &img, nil - case "local": - st := llb.Local(vv[1], - llb.SessionID(c.BuildOpts().SessionID), - llb.FollowPaths([]string{dockerignoreFilename}), - llb.SharedKeyHint("context:"+pname+"-"+dockerignoreFilename), - llb.WithCustomName("[context "+pname+"] load "+dockerignoreFilename), - llb.Differ(llb.DiffNone, false), - ) - def, err := st.Marshal(ctx) - if err != nil { - return nil, nil, err - } - res, err := c.Solve(ctx, client.SolveRequest{ - Evaluate: true, - Definition: def.ToPB(), - }) - if err != nil { - return nil, nil, err - } - ref, err := res.SingleRef() - if err != nil { - return nil, nil, err - } - dt, _ := ref.ReadFile(ctx, client.ReadRequest{ - Filename: dockerignoreFilename, - }) // error ignored - var excludes []string - if len(dt) != 0 { - excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dt)) - if err != nil { - return nil, nil, err - } - } - st = llb.Local(vv[1], - llb.WithCustomName("[context "+pname+"] load from client"), - llb.SessionID(c.BuildOpts().SessionID), - llb.SharedKeyHint("context:"+pname), - llb.ExcludePatterns(excludes), - ) - return &st, nil, nil - case "input": - inputs, err := c.Inputs(ctx) - if err != nil { - return nil, nil, err - } - st, ok := inputs[vv[1]] - if !ok { - return nil, nil, errors.Errorf("invalid input %s for %s", vv[1], pname) - } - md, ok := opts[inputMetadataPrefix+vv[1]] - if ok { - m := make(map[string][]byte) - if err := json.Unmarshal([]byte(md), &m); err != nil { - return nil, nil, errors.Wrapf(err, "failed to parse input metadata %s", md) - } - var img *dockerfile2llb.Image - if dtic, ok := m[exptypes.ExporterImageConfigKey]; ok { - st, err = st.WithImageConfig(dtic) - if err != nil { - return nil, nil, err - } - if err := json.Unmarshal(dtic, &img); err != nil { - return nil, nil, errors.Wrapf(err, "failed to parse image config for %s", pname) - } - } - return &st, img, nil - } - return &st, nil, nil - default: - return nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], pname) - } -} - func wrapSource(err error, sm *llb.SourceMap, ranges []parser.Range) error { if sm == nil { return err @@ -1106,6 +256,7 @@ func wrapSource(err error, sm *llb.SourceMap, ranges []parser.Range) error { Info: &pb.SourceInfo{ Data: sm.Data, Filename: sm.Filename, + Language: sm.Language, Definition: sm.Definition.ToPB(), }, Ranges: make([]*pb.Range, 0, len(ranges)), @@ -1124,15 +275,3 @@ func wrapSource(err error, sm *llb.SourceMap, ranges []parser.Range) error { } return errdefs.WithSource(err, s) } - -func parseSourceDateEpoch(v string) (*time.Time, error) { - if v == "" { - return nil, nil - } - sde, err := strconv.ParseInt(v, 10, 64) - if err != nil { - return nil, errors.Wrapf(err, "invalid SOURCE_DATE_EPOCH: %s", v) - } - tm := time.Unix(sde, 0).UTC() - return &tm, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/subrequests.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/subrequests.go deleted file mode 100644 index 8449530238..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/subrequests.go +++ /dev/null @@ -1,54 +0,0 @@ -package builder - -import ( - "bytes" - "context" - "encoding/json" - - "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/frontend/subrequests" - "github.com/moby/buildkit/frontend/subrequests/outline" - "github.com/moby/buildkit/frontend/subrequests/targets" - "github.com/moby/buildkit/solver/errdefs" -) - -func checkSubRequest(ctx context.Context, opts map[string]string) (*client.Result, bool, error) { - req, ok := opts[keyRequestID] - if !ok { - return nil, false, nil - } - switch req { - case subrequests.RequestSubrequestsDescribe: - res, err := describe() - return res, true, err - case outline.RequestSubrequestsOutline, targets.RequestTargets: // handled later - return nil, false, nil - default: - return nil, true, errdefs.NewUnsupportedSubrequestError(req) - } -} - -func describe() (*client.Result, error) { - all := []subrequests.Request{ - outline.SubrequestsOutlineDefinition, - targets.SubrequestsTargetsDefinition, - subrequests.SubrequestsDescribeDefinition, - } - dt, err := json.MarshalIndent(all, "", " ") - if err != nil { - return nil, err - } - - b := bytes.NewBuffer(nil) - if err := subrequests.PrintDescribe(dt, b); err != nil { - return nil, err - } - - res := client.NewResult() - res.Metadata = map[string][]byte{ - "result.json": dt, - "result.txt": b.Bytes(), - "version": []byte(subrequests.SubrequestsDescribeDefinition.Version), - } - return res, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go index 6476267e2d..738ebf7d05 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go @@ -24,12 +24,12 @@ import ( "github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/moby/buildkit/frontend/dockerfile/shell" + "github.com/moby/buildkit/frontend/dockerui" "github.com/moby/buildkit/frontend/subrequests/outline" "github.com/moby/buildkit/frontend/subrequests/targets" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" - binfotypes "github.com/moby/buildkit/util/buildinfo/types" "github.com/moby/buildkit/util/gitutil" "github.com/moby/buildkit/util/suggest" "github.com/moby/buildkit/util/system" @@ -41,9 +41,8 @@ import ( ) const ( - emptyImageName = "scratch" - defaultContextLocalName = "context" - historyComment = "buildkit.dockerfile.v0" + emptyImageName = "scratch" + historyComment = "buildkit.dockerfile.v0" sbomScanContext = "BUILDKIT_SBOM_SCAN_CONTEXT" sbomScanStage = "BUILDKIT_SBOM_SCAN_STAGE" @@ -55,34 +54,13 @@ var nonEnvArgs = map[string]struct{}{ } type ConvertOpt struct { - Target string - MetaResolver llb.ImageMetaResolver - BuildArgs map[string]string - Labels map[string]string - SessionID string - BuildContext *llb.State - Excludes []string - // IgnoreCache contains names of the stages that should not use build cache. - // Empty slice means ignore cache for all stages. Nil doesn't disable cache. - IgnoreCache []string - // CacheIDNamespace scopes the IDs for different cache mounts - CacheIDNamespace string - ImageResolveMode llb.ResolveMode - TargetPlatform *ocispecs.Platform - BuildPlatforms []ocispecs.Platform - PrefixPlatform bool - ExtraHosts []llb.HostIP - ShmSize int64 - Ulimit []pb.Ulimit - CgroupParent string - ForceNetMode pb.NetMode - LLBCaps *apicaps.CapSet - ContextLocalName string - SourceMap *llb.SourceMap - Hostname string - SourceDateEpoch *time.Time - Warn func(short, url string, detail [][]byte, location *parser.Range) - ContextByName func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, error) + dockerui.Config + Client *dockerui.Client + SourceMap *llb.SourceMap + TargetPlatform *ocispecs.Platform + MetaResolver llb.ImageMetaResolver + LLBCaps *apicaps.CapSet + Warn func(short, url string, detail [][]byte, location *parser.Range) } type SBOMTargets struct { @@ -92,7 +70,7 @@ type SBOMTargets struct { IgnoreCache bool } -func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, *SBOMTargets, error) { +func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *image.Image, *SBOMTargets, error) { ds, err := toDispatchState(ctx, dt, opt) if err != nil { return nil, nil, nil, err @@ -158,35 +136,36 @@ func ListTargets(ctx context.Context, dt []byte) (*targets.List, error) { } func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchState, error) { - contextByName := opt.ContextByName - opt.ContextByName = func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, error) { - if !strings.EqualFold(name, "scratch") && !strings.EqualFold(name, "context") { - if contextByName != nil { - if p == nil { - p = opt.TargetPlatform - } - st, img, err := contextByName(ctx, name, resolveMode, p) - if err != nil { - return nil, nil, err - } - return st, img, nil - } - } - return nil, nil, nil - } - if len(dt) == 0 { return nil, errors.Errorf("the Dockerfile cannot be empty") } - if opt.ContextLocalName == "" { - opt.ContextLocalName = defaultContextLocalName + namedContext := func(ctx context.Context, name string, copt dockerui.ContextOpt) (*llb.State, *image.Image, error) { + if opt.Client == nil { + return nil, nil, nil + } + if !strings.EqualFold(name, "scratch") && !strings.EqualFold(name, "context") { + if copt.Platform == nil { + copt.Platform = opt.TargetPlatform + } + st, img, err := opt.Client.NamedContext(ctx, name, copt) + if err != nil { + return nil, nil, err + } + return st, img, nil + } + return nil, nil, nil } if opt.Warn == nil { opt.Warn = func(string, string, [][]byte, *parser.Range) {} } + if opt.Client != nil && opt.LLBCaps == nil { + caps := opt.Client.BuildOpts().LLBCaps + opt.LLBCaps = &caps + } + platformOpt := buildPlatformOpt(&opt) optMetaArgs := getPlatformArgs(platformOpt) @@ -254,9 +233,9 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS deps: make(map[*dispatchState]struct{}), ctxPaths: make(map[string]struct{}), stageName: st.Name, - prefixPlatform: opt.PrefixPlatform, + prefixPlatform: opt.MultiPlatformRequested, outline: outline.clone(), - epoch: opt.SourceDateEpoch, + epoch: opt.Epoch, } if v := st.Platform; v != "" { @@ -276,7 +255,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } if st.Name != "" { - s, img, err := opt.ContextByName(ctx, st.Name, opt.ImageResolveMode.String(), ds.platform) + s, img, err := namedContext(ctx, st.Name, dockerui.ContextOpt{Platform: ds.platform, ResolveMode: opt.ImageResolveMode.String()}) if err != nil { return nil, err } @@ -284,7 +263,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS ds.noinit = true ds.state = *s if img != nil { - ds.image = clampTimes(*img, opt.SourceDateEpoch) + ds.image = clampTimes(*img, opt.Epoch) if img.Architecture != "" && img.OS != "" { ds.platform = &ocispecs.Platform{ OS: img.OS, @@ -321,17 +300,8 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } } ds.cmdTotal = total - - if opt.IgnoreCache != nil { - if len(opt.IgnoreCache) == 0 { - ds.ignoreCache = true - } else if st.Name != "" { - for _, n := range opt.IgnoreCache { - if strings.EqualFold(n, st.Name) { - ds.ignoreCache = true - } - } - } + if opt.Client != nil { + ds.ignoreCache = opt.Client.IsNoCache(st.Name) } } @@ -381,6 +351,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } } + baseCtx := ctx eg, ctx := errgroup.WithContext(ctx) for i, d := range allDispatchStates.states { reachable := isReachable(target, d) @@ -389,6 +360,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS if d.stage.BaseName == emptyImageName { d.state = llb.Scratch() d.image = emptyImage(platformOpt.targetPlatform) + d.platform = &platformOpt.targetPlatform continue } func(i int, d *dispatchState) { @@ -410,7 +382,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS d.stage.BaseName = reference.TagNameOnly(ref).String() var isScratch bool - st, img, err := opt.ContextByName(ctx, d.stage.BaseName, opt.ImageResolveMode.String(), platform) + st, img, err := namedContext(ctx, d.stage.BaseName, dockerui.ContextOpt{ResolveMode: opt.ImageResolveMode.String(), Platform: platform}) if err != nil { return err } @@ -426,20 +398,28 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } if reachable { prefix := "[" - if opt.PrefixPlatform && platform != nil { + if opt.MultiPlatformRequested && platform != nil { prefix += platforms.Format(*platform) + " " } prefix += "internal]" - dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, llb.ResolveImageConfigOpt{ - Platform: platform, - ResolveMode: opt.ImageResolveMode.String(), - LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName), - ResolverType: llb.ResolverTypeRegistry, + mutRef, dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, llb.ResolveImageConfigOpt{ + Platform: platform, + ResolveMode: opt.ImageResolveMode.String(), + LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName), + ResolverType: llb.ResolverTypeRegistry, + SourcePolicies: nil, }) if err != nil { return suggest.WrapError(errors.Wrap(err, origName), origName, append(allStageNames, commonImageNames()...), true) } - var img Image + + if ref.String() != mutRef { + ref, err = reference.ParseNormalizedNamed(mutRef) + if err != nil { + return errors.Wrapf(err, "failed to parse ref %q", mutRef) + } + } + var img image.Image if err := json.Unmarshal(dt, &img); err != nil { return errors.Wrap(err, "failed to parse image config") } @@ -466,16 +446,6 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS } } } - if !isScratch { - // if image not scratch set original image name as ref - // and actual reference as alias in binfotypes.Source - d.buildInfo.Sources = append(d.buildInfo.Sources, binfotypes.Source{ - Type: binfotypes.SourceTypeDockerImage, - Ref: origName, - Alias: ref.String(), - Pin: dgst.String(), - }) - } d.image = img } if isScratch { @@ -485,7 +455,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS dfCmd(d.stage.SourceCode), llb.Platform(*platform), opt.ImageResolveMode, - llb.WithCustomName(prefixCommand(d, "FROM "+d.stage.BaseName, opt.PrefixPlatform, platform, nil)), + llb.WithCustomName(prefixCommand(d, "FROM "+d.stage.BaseName, opt.MultiPlatformRequested, platform, nil)), location(opt.SourceMap, d.stage.Location), ) } @@ -500,6 +470,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS return nil, err } + ctx = baseCtx buildContext := &mutableOutput{} ctxPaths := map[string]struct{}{} @@ -516,11 +487,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS // make sure that PATH is always set if _, ok := shell.BuildEnvs(d.image.Config.Env)["PATH"]; !ok { - var pathOS string - if d.platform != nil { - pathOS = d.platform.OS - } - d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv(pathOS)) + d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv(d.platform.OS)) } // initialize base metadata from image conf @@ -541,14 +508,12 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS return nil, parser.WithLocation(err, d.stage.Location) } } - d.state = d.state.Network(opt.ForceNetMode) - + d.state = d.state.Network(opt.NetworkMode) opt := dispatchOpt{ allDispatchStates: allDispatchStates, metaArgs: optMetaArgs, buildArgValues: opt.BuildArgs, shlex: shlex, - sessionID: opt.SessionID, buildContext: llb.NewState(buildContext), proxyEnv: proxyEnv, cacheIDNamespace: opt.CacheIDNamespace, @@ -556,7 +521,7 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS targetPlatform: platformOpt.targetPlatform, extraHosts: opt.ExtraHosts, shmSize: opt.ShmSize, - ulimit: opt.Ulimit, + ulimit: opt.Ulimits, cgroupParent: opt.CgroupParent, llbCaps: opt.LLBCaps, sourceMap: opt.SourceMap, @@ -598,21 +563,17 @@ func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchS target.image.Config.Labels[k] = v } - opts := []llb.LocalOption{ - llb.SessionID(opt.SessionID), - llb.ExcludePatterns(opt.Excludes), - llb.SharedKeyHint(opt.ContextLocalName), - WithInternalName("load build context"), - } + opts := []llb.LocalOption{} if includePatterns := normalizeContextPaths(ctxPaths); includePatterns != nil { opts = append(opts, llb.FollowPaths(includePatterns)) } - - bc := llb.Local(opt.ContextLocalName, opts...) - if opt.BuildContext != nil { - bc = *opt.BuildContext + if opt.Client != nil { + bctx, err := opt.Client.MainContext(ctx, opts...) + if err != nil { + return nil, err + } + buildContext.Output = bctx.Output() } - buildContext.Output = bc.Output() defaults := []llb.ConstraintsOpt{ llb.Platform(platformOpt.targetPlatform), @@ -678,7 +639,6 @@ type dispatchOpt struct { metaArgs []instructions.KeyValuePairOptional buildArgValues map[string]string shlex *shell.Lex - sessionID string buildContext llb.State proxyEnv *llb.ProxyEnv cacheIDNamespace string @@ -809,7 +769,7 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { type dispatchState struct { opt dispatchOpt state llb.State - image Image + image image.Image platform *ocispecs.Platform stage instructions.Stage base *dispatchState @@ -825,7 +785,6 @@ type dispatchState struct { cmdIndex int cmdTotal int prefixPlatform bool - buildInfo binfotypes.BuildInfo outline outlineCapture epoch *time.Time scanStage bool @@ -936,7 +895,8 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE } st := llb.Scratch().Dir(sourcePath).File( llb.Mkfile(f, 0755, []byte(data)), - WithInternalName("preparing inline document"), + dockerui.WithInternalName("preparing inline document"), + llb.Platform(*d.platform), ) mount := llb.AddMount(destPath, st, llb.SourcePath(sourcePath), llb.Readonly) @@ -1040,12 +1000,20 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE } func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool, opt *dispatchOpt) error { - d.state = d.state.Dir(c.Path) - wd := c.Path - if !path.IsAbs(c.Path) { - wd = path.Join("/", d.image.Config.WorkingDir, wd) + wd, err := system.NormalizeWorkdir(d.image.Config.WorkingDir, c.Path, d.platform.OS) + if err != nil { + return errors.Wrap(err, "normalizing workdir") } + + // NormalizeWorkdir returns paths with platform specific separators. For Windows + // this will be of the form: \some\path, which is needed later when we pass it to + // HCS. d.image.Config.WorkingDir = wd + + // From this point forward, we can use UNIX style paths. + wd = system.ToSlash(wd, d.platform.OS) + d.state = d.state.Dir(wd) + if commit { withLayer := false if wd != "/" { @@ -1064,6 +1032,7 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo d.state = d.state.File(llb.Mkdir(wd, 0755, mkdirOpt...), llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, c.String(), env)), d.prefixPlatform, &platform, env)), location(opt.sourceMap, c.Location()), + llb.Platform(*d.platform), ) withLayer = true } @@ -1073,11 +1042,11 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo } func dispatchCopy(d *dispatchState, cfg copyConfig) error { - pp, err := pathRelativeToWorkingDir(d.state, cfg.params.DestPath) + dest, err := pathRelativeToWorkingDir(d.state, cfg.params.DestPath, *d.platform) if err != nil { return err } - dest := path.Join("/", pp) + if cfg.params.DestPath == "." || cfg.params.DestPath == "" || cfg.params.DestPath[len(cfg.params.DestPath)-1] == filepath.Separator { dest += string(filepath.Separator) } @@ -1101,9 +1070,6 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error { if !cfg.isAddCommand { return errors.New("checksum can't be specified for COPY") } - if !addChecksumEnabled { - return errors.New("instruction 'ADD --checksum=' requires the labs channel") - } if len(cfg.params.SourcePaths) != 1 { return errors.New("checksum can't be specified for multiple sources") } @@ -1128,9 +1094,6 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error { if !cfg.isAddCommand { return errors.New("source can't be a git ref for COPY") } - if !addGitEnabled { - return errors.New("instruction ADD requires the labs channel") - } // TODO: print a warning (not an error) if gitRef.UnencryptedTCP is true commit := gitRef.Commit if gitRef.SubDir != "" { @@ -1181,6 +1144,11 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error { a = a.Copy(st, f, dest, opts...) } } else { + src, err = system.NormalizePath("/", src, d.platform.OS, false) + if err != nil { + return errors.Wrap(err, "removing drive letter") + } + opts := append([]llb.CopyOption{&llb.CopyInfo{ Mode: mode, FollowSymlinks: true, @@ -1192,9 +1160,9 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error { }}, copyOpt...) if a == nil { - a = llb.Copy(cfg.source, filepath.Join("/", src), dest, opts...) + a = llb.Copy(cfg.source, src, dest, opts...) } else { - a = a.Copy(cfg.source, filepath.Join("/", src), dest, opts...) + a = a.Copy(cfg.source, src, dest, opts...) } } } @@ -1203,10 +1171,14 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error { commitMessage.WriteString(" <<" + src.Path) data := src.Data - f := src.Path + f, err := system.CheckSystemDriveAndRemoveDriveLetter(src.Path, d.platform.OS) + if err != nil { + return errors.Wrap(err, "removing drive letter") + } st := llb.Scratch().File( - llb.Mkfile(f, 0664, []byte(data)), - WithInternalName("preparing inline document"), + llb.Mkfile(f, 0644, []byte(data)), + dockerui.WithInternalName("preparing inline document"), + llb.Platform(*d.platform), ) opts := append([]llb.CopyOption{&llb.CopyInfo{ @@ -1215,9 +1187,9 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error { }}, copyOpt...) if a == nil { - a = llb.Copy(st, f, dest, opts...) + a = llb.Copy(st, system.ToSlash(f, d.platform.OS), dest, opts...) } else { - a = a.Copy(st, f, dest, opts...) + a = a.Copy(st, filepath.ToSlash(f), dest, opts...) } } @@ -1248,7 +1220,9 @@ func dispatchCopy(d *dispatchState, cfg copyConfig) error { d.cmdIndex-- // prefixCommand increases it pgName := prefixCommand(d, name, d.prefixPlatform, &platform, env) - var copyOpts []llb.ConstraintsOpt + copyOpts := []llb.ConstraintsOpt{ + llb.Platform(*d.platform), + } copy(copyOpts, fileOpt) copyOpts = append(copyOpts, llb.ProgressGroup(pgID, pgName, true)) @@ -1307,7 +1281,7 @@ func dispatchCmd(d *dispatchState, c *instructions.CmdCommand) error { args = withShell(d.image, args) } d.image.Config.Cmd = args - d.image.Config.ArgsEscaped = true + d.image.Config.ArgsEscaped = true //nolint:staticcheck // ignore SA1019: field is deprecated in OCI Image spec, but used for backward-compatibility with Docker image spec. d.cmdSet = true return commitToHistory(&d.image, fmt.Sprintf("CMD %q", args), false, nil, d.epoch) } @@ -1326,11 +1300,12 @@ func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) err func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) error { d.image.Config.Healthcheck = &image.HealthConfig{ - Test: c.Health.Test, - Interval: c.Health.Interval, - Timeout: c.Health.Timeout, - StartPeriod: c.Health.StartPeriod, - Retries: c.Health.Retries, + Test: c.Health.Test, + Interval: c.Health.Interval, + Timeout: c.Health.Timeout, + StartPeriod: c.Health.StartPeriod, + StartInterval: c.Health.StartInterval, + Retries: c.Health.Retries, } return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil, d.epoch) } @@ -1440,15 +1415,24 @@ func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instru return commitToHistory(&d.image, "ARG "+strings.Join(commitStrs, " "), false, nil, d.epoch) } -func pathRelativeToWorkingDir(s llb.State, p string) (string, error) { - if path.IsAbs(p) { - return p, nil - } - dir, err := s.GetDir(context.TODO()) +func pathRelativeToWorkingDir(s llb.State, p string, platform ocispecs.Platform) (string, error) { + dir, err := s.GetDir(context.TODO(), llb.Platform(platform)) if err != nil { return "", err } - return path.Join(dir, p), nil + + if len(p) == 0 { + return dir, nil + } + p, err = system.CheckSystemDriveAndRemoveDriveLetter(p, platform.OS) + if err != nil { + return "", errors.Wrap(err, "removing drive letter") + } + + if system.IsAbs(p, platform.OS) { + return system.NormalizePath("/", p, platform.OS, false) + } + return system.NormalizePath(dir, p, platform.OS, false) } func addEnv(env []string, k, v string) []string { @@ -1514,7 +1498,7 @@ func runCommandString(args []string, buildArgs []instructions.KeyValuePairOption return strings.Join(append(tmpBuildEnv, args...), " ") } -func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State, tm *time.Time) error { +func commitToHistory(img *image.Image, msg string, withLayer bool, st *llb.State, tm *time.Time) error { if st != nil { msg += " # buildkit" } @@ -1642,7 +1626,7 @@ type mutableOutput struct { llb.Output } -func withShell(img Image, args []string) []string { +func withShell(img image.Image, args []string) []string { var shell []string if len(img.Config.Shell) > 0 { shell = append([]string{}, img.Config.Shell...) @@ -1652,7 +1636,7 @@ func withShell(img Image, args []string) []string { return append(shell, strings.Join(args, " ")) } -func autoDetectPlatform(img Image, target ocispecs.Platform, supported []ocispecs.Platform) ocispecs.Platform { +func autoDetectPlatform(img image.Image, target ocispecs.Platform, supported []ocispecs.Platform) ocispecs.Platform { os := img.OS arch := img.Architecture if target.OS == os && target.Architecture == arch { @@ -1666,10 +1650,6 @@ func autoDetectPlatform(img Image, target ocispecs.Platform, supported []ocispec return target } -func WithInternalName(name string) llb.ConstraintsOpt { - return llb.WithCustomName("[internal] " + name) -} - func uppercaseCmd(str string) string { p := strings.SplitN(str, " ", 2) p[0] = strings.ToUpper(p[0]) @@ -1794,7 +1774,7 @@ func commonImageNames() []string { return out } -func clampTimes(img Image, tm *time.Time) Image { +func clampTimes(img image.Image, tm *time.Time) image.Image { if tm == nil { return img } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go deleted file mode 100644 index 4506baeb8b..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build dfaddchecksum -// +build dfaddchecksum - -package dockerfile2llb - -const addChecksumEnabled = true diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addgit.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addgit.go deleted file mode 100644 index 9ccb7a20e8..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addgit.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build dfaddgit -// +build dfaddgit - -package dockerfile2llb - -const addGitEnabled = true diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go deleted file mode 100644 index 8de035297c..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build !dfaddchecksum -// +build !dfaddchecksum - -package dockerfile2llb - -const addChecksumEnabled = false diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go deleted file mode 100644 index 119bb32c88..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build !dfaddgit -// +build !dfaddgit - -package dockerfile2llb - -const addGitEnabled = false diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go index 1015590a0d..7485357bab 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go @@ -96,7 +96,7 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []* } if mount.ReadOnly { mountOpts = append(mountOpts, llb.Readonly) - } else if mount.Type == instructions.MountTypeBind && opt.llbCaps.Supports(pb.CapExecMountBindReadWriteNoOuput) == nil { + } else if mount.Type == instructions.MountTypeBind && opt.llbCaps.Supports(pb.CapExecMountBindReadWriteNoOutput) == nil { mountOpts = append(mountOpts, llb.ForceNoOutput) } if mount.Type == instructions.MountTypeCache { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go index 5c3bdeec32..70d81262bc 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go @@ -6,11 +6,7 @@ import ( ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) -// Image is the JSON structure which describes some basic information about the image. -// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. -type Image image.Image - -func clone(src Image) Image { +func clone(src image.Image) image.Image { img := src img.Config = src.Config img.Config.Env = append([]string{}, src.Config.Env...) @@ -19,8 +15,8 @@ func clone(src Image) Image { return img } -func emptyImage(platform ocispecs.Platform) Image { - img := Image{} +func emptyImage(platform ocispecs.Platform) image.Image { + img := image.Image{} img.Architecture = platform.Architecture img.OS = platform.OS img.Variant = platform.Variant diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go index a527175b73..66e50d8aad 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go @@ -156,14 +156,7 @@ func (bf *BFlags) Parse() error { return nil } - arg = arg[2:] - value := "" - - index := strings.Index(arg, "=") - if index >= 0 { - value = arg[index+1:] - arg = arg[:index] - } + arg, value, hasValue := strings.Cut(arg[2:], "=") flag, ok := bf.flags[arg] if !ok { @@ -180,27 +173,27 @@ func (bf *BFlags) Parse() error { switch flag.flagType { case boolType: // value == "" is only ok if no "=" was specified - if index >= 0 && value == "" { + if hasValue && value == "" { return errors.Errorf("missing a value on flag: %s", arg) } - lower := strings.ToLower(value) - if lower == "" { + switch strings.ToLower(value) { + case "true", "": flag.Value = "true" - } else if lower == "true" || lower == "false" { - flag.Value = lower - } else { + case "false": + flag.Value = "false" + default: return errors.Errorf("expecting boolean value for flag %s, not: %s", arg, value) } case stringType: - if index < 0 { + if !hasValue { return errors.Errorf("missing a value on flag: %s", arg) } flag.Value = value case stringsType: - if index < 0 { + if !hasValue { return errors.Errorf("missing a value on flag: %s", arg) } flag.StringValues = append(flag.StringValues, value) diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go index e328b27bc7..34e8fcc91d 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go @@ -11,13 +11,17 @@ import ( "github.com/pkg/errors" ) -const MountTypeBind = "bind" -const MountTypeCache = "cache" -const MountTypeTmpfs = "tmpfs" -const MountTypeSecret = "secret" -const MountTypeSSH = "ssh" +type MountType string -var allowedMountTypes = map[string]struct{}{ +const ( + MountTypeBind MountType = "bind" + MountTypeCache MountType = "cache" + MountTypeTmpfs MountType = "tmpfs" + MountTypeSecret MountType = "secret" + MountTypeSSH MountType = "ssh" +) + +var allowedMountTypes = map[MountType]struct{}{ MountTypeBind: {}, MountTypeCache: {}, MountTypeTmpfs: {}, @@ -25,11 +29,15 @@ var allowedMountTypes = map[string]struct{}{ MountTypeSSH: {}, } -const MountSharingShared = "shared" -const MountSharingPrivate = "private" -const MountSharingLocked = "locked" +type ShareMode string -var allowedSharingTypes = map[string]struct{}{ +const ( + MountSharingShared ShareMode = "shared" + MountSharingPrivate ShareMode = "private" + MountSharingLocked ShareMode = "locked" +) + +var allowedSharingModes = map[ShareMode]struct{}{ MountSharingShared: {}, MountSharingPrivate: {}, MountSharingLocked: {}, @@ -44,31 +52,18 @@ func init() { parseRunPostHooks = append(parseRunPostHooks, runMountPostHook) } -func isValidMountType(s string) bool { - if s == "secret" { - if !isSecretMountsSupported() { - return false - } +func allShareModes() []string { + types := make([]string, 0, len(allowedSharingModes)) + for k := range allowedSharingModes { + types = append(types, string(k)) } - if s == "ssh" { - if !isSSHMountsSupported() { - return false - } - } - _, ok := allowedMountTypes[s] - return ok + return types } func allMountTypes() []string { - types := make([]string, 0, len(allowedMountTypes)+2) + types := make([]string, 0, len(allowedMountTypes)) for k := range allowedMountTypes { - types = append(types, k) - } - if isSecretMountsSupported() { - types = append(types, "secret") - } - if isSSHMountsSupported() { - types = append(types, "ssh") + types = append(types, string(k)) } return types } @@ -119,22 +114,22 @@ type mountState struct { } type Mount struct { - Type string + Type MountType From string Source string Target string ReadOnly bool SizeLimit int64 CacheID string - CacheSharing string + CacheSharing ShareMode Required bool Mode *uint64 UID *uint64 GID *uint64 } -func parseMount(value string, expander SingleWordExpander) (*Mount, error) { - csvReader := csv.NewReader(strings.NewReader(value)) +func parseMount(val string, expander SingleWordExpander) (*Mount, error) { + csvReader := csv.NewReader(strings.NewReader(val)) fields, err := csvReader.Read() if err != nil { return nil, errors.Wrap(err, "failed to parse csv mounts") @@ -145,10 +140,10 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { roAuto := true for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) + key, value, ok := strings.Cut(field, "=") + key = strings.ToLower(key) - if len(parts) == 1 { + if !ok { if expander == nil { continue // evaluate later } @@ -162,27 +157,24 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { roAuto = false continue case "required": - if m.Type == "secret" || m.Type == "ssh" { + if m.Type == MountTypeSecret || m.Type == MountTypeSSH { m.Required = true continue } else { return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) } + default: + // any other option requires a value. + return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) } } - if len(parts) != 2 { - return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] // check for potential variable if expander != nil { - processed, err := expander(value) + value, err = expander(value) if err != nil { return nil, err } - value = processed } else if key == "from" { if matched, err := regexp.MatchString(`\$.`, value); err != nil { //nolint return nil, err @@ -196,10 +188,11 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { switch key { case "type": - if !isValidMountType(strings.ToLower(value)) { + v := MountType(strings.ToLower(value)) + if _, ok := allowedMountTypes[v]; !ok { return nil, suggest.WrapError(errors.Errorf("unsupported mount type %q", value), value, allMountTypes(), true) } - m.Type = strings.ToLower(value) + m.Type = v case "from": m.From = value case "source", "src": @@ -220,17 +213,16 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { m.ReadOnly = !rw roAuto = false case "required": - if m.Type == "secret" || m.Type == "ssh" { - v, err := strconv.ParseBool(value) + if m.Type == MountTypeSecret || m.Type == MountTypeSSH { + m.Required, err = strconv.ParseBool(value) if err != nil { return nil, errors.Errorf("invalid value for %s: %s", key, value) } - m.Required = v } else { return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) } case "size": - if m.Type == "tmpfs" { + if m.Type == MountTypeTmpfs { m.SizeLimit, err = units.RAMInBytes(value) if err != nil { return nil, errors.Errorf("invalid value for %s: %s", key, value) @@ -241,10 +233,11 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { case "id": m.CacheID = value case "sharing": - if _, ok := allowedSharingTypes[strings.ToLower(value)]; !ok { - return nil, errors.Errorf("unsupported sharing value %q", value) + v := ShareMode(strings.ToLower(value)) + if _, ok := allowedSharingModes[v]; !ok { + return nil, suggest.WrapError(errors.Errorf("unsupported sharing value %q", value), value, allShareModes(), true) } - m.CacheSharing = strings.ToLower(value) + m.CacheSharing = v case "mode": mode, err := strconv.ParseUint(value, 8, 32) if err != nil { @@ -273,16 +266,16 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { fileInfoAllowed := m.Type == MountTypeSecret || m.Type == MountTypeSSH || m.Type == MountTypeCache - if m.Mode != nil && !fileInfoAllowed { - return nil, errors.Errorf("mode not allowed for %q type mounts", m.Type) - } - - if m.UID != nil && !fileInfoAllowed { - return nil, errors.Errorf("uid not allowed for %q type mounts", m.Type) - } - - if m.GID != nil && !fileInfoAllowed { - return nil, errors.Errorf("gid not allowed for %q type mounts", m.Type) + if !fileInfoAllowed { + if m.Mode != nil { + return nil, errors.Errorf("mode not allowed for %q type mounts", m.Type) + } + if m.UID != nil { + return nil, errors.Errorf("uid not allowed for %q type mounts", m.Type) + } + if m.GID != nil { + return nil, errors.Errorf("gid not allowed for %q type mounts", m.Type) + } } if roAuto { @@ -293,10 +286,6 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { } } - if m.CacheSharing != "" && m.Type != MountTypeCache { - return nil, errors.Errorf("invalid cache sharing set for %v mount", m.Type) - } - if m.Type == MountTypeSecret { if m.From != "" { return nil, errors.Errorf("secret mount should not have a from") @@ -312,5 +301,9 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { } } + if m.CacheSharing != "" && m.Type != MountTypeCache { + return nil, errors.Errorf("invalid cache sharing set for %v mount", m.Type) + } + return m, nil } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go index 142c3075b5..0ced44dae6 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go @@ -4,13 +4,15 @@ import ( "github.com/pkg/errors" ) +type NetworkMode = string + const ( - NetworkDefault = "default" - NetworkNone = "none" - NetworkHost = "host" + NetworkDefault NetworkMode = "default" + NetworkNone NetworkMode = "none" + NetworkHost NetworkMode = "host" ) -var allowedNetwork = map[string]struct{}{ +var allowedNetwork = map[NetworkMode]struct{}{ NetworkDefault: {}, NetworkNone: {}, NetworkHost: {}, @@ -51,7 +53,7 @@ func runNetworkPostHook(cmd *RunCommand, req parseRequest) error { return nil } -func GetNetwork(cmd *RunCommand) string { +func GetNetwork(cmd *RunCommand) NetworkMode { return cmd.getExternalValue(networkKey).(*networkState).networkMode } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go deleted file mode 100644 index 2b4140b72a..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go +++ /dev/null @@ -1,5 +0,0 @@ -package instructions - -func isSecretMountsSupported() bool { - return true -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go deleted file mode 100644 index 0e4e5f38c7..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go +++ /dev/null @@ -1,5 +0,0 @@ -package instructions - -func isSSHMountsSupported() bool { - return true -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go index 6c362fc6fa..5e03f84243 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go @@ -357,12 +357,13 @@ func parseFrom(req parseRequest) (*Stage, error) { }, nil } -func parseBuildStageName(args []string) (string, error) { - stageName := "" +var validStageName = regexp.MustCompile("^[a-z][a-z0-9-_.]*$") + +func parseBuildStageName(args []string) (stageName string, err error) { switch { case len(args) == 3 && strings.EqualFold(args[1], "as"): stageName = strings.ToLower(args[2]) - if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok { + if !validStageName.MatchString(stageName) { return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", args[2]) } case len(args) != 1: @@ -543,6 +544,7 @@ func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) { flInterval := req.flags.AddString("interval", "") flTimeout := req.flags.AddString("timeout", "") flStartPeriod := req.flags.AddString("start-period", "") + flStartInterval := req.flags.AddString("start-interval", "") flRetries := req.flags.AddString("retries", "") if err := req.flags.Parse(); err != nil { @@ -583,6 +585,12 @@ func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) { } healthcheck.StartPeriod = startPeriod + startInterval, err := parseOptInterval(flStartInterval) + if err != nil { + return nil, err + } + healthcheck.StartInterval = startInterval + if flRetries.Value != "" { retries, err := strconv.ParseInt(flRetries.Value, 10, 32) if err != nil { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go index d6723635d4..4a6129fdc8 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go @@ -49,8 +49,7 @@ func (node *Node) Location() []Range { // Dump dumps the AST defined by `node` as a list of sexps. // Returns a string suitable for printing. func (node *Node) Dump() string { - str := "" - str += strings.ToLower(node.Value) + str := strings.ToLower(node.Value) if len(node.Flags) > 0 { str += fmt.Sprintf(" %q", node.Flags) diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go index bf0887f236..f9aca5d9ef 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go @@ -4,8 +4,8 @@ package shell // EqualEnvKeys compare two strings and returns true if they are equal. -// On Unix this comparison is case sensitive. -// On Windows this comparison is case insensitive. +// On Unix this comparison is case-sensitive. +// On Windows this comparison is case-insensitive. func EqualEnvKeys(from, to string) bool { return from == to } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go index 010569bbaa..7bbed9b207 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_windows.go @@ -3,8 +3,8 @@ package shell import "strings" // EqualEnvKeys compare two strings and returns true if they are equal. -// On Unix this comparison is case sensitive. -// On Windows this comparison is case insensitive. +// On Unix this comparison is case-sensitive. +// On Windows this comparison is case-insensitive. func EqualEnvKeys(from, to string) bool { - return strings.ToUpper(from) == strings.ToUpper(to) + return strings.EqualFold(from, to) } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go index b930ab3260..80806f8ba7 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go @@ -335,39 +335,23 @@ func (sw *shellWord) processDollar() (string, error) { } name := sw.processName() ch := sw.scanner.Next() + chs := string(ch) + nullIsUnset := false + switch ch { case '}': // Normal ${xx} case - value, found := sw.getEnv(name) - if !found && sw.skipUnsetEnv { + value, set := sw.getEnv(name) + if !set && sw.skipUnsetEnv { return fmt.Sprintf("${%s}", name), nil } return value, nil - case '?': - word, _, err := sw.processStopOn('}') - if err != nil { - if sw.scanner.Peek() == scanner.EOF { - return "", errors.New("syntax error: missing '}'") - } - return "", err - } - newValue, found := sw.getEnv(name) - if !found { - if sw.skipUnsetEnv { - return fmt.Sprintf("${%s?%s}", name, word), nil - } - message := "is not allowed to be unset" - if word != "" { - message = word - } - return "", errors.Errorf("%s: %s", name, message) - } - return newValue, nil case ':': - // Special ${xx:...} format processing - // Yes it allows for recursive $'s in the ... spot - modifier := sw.scanner.Next() - + nullIsUnset = true + ch = sw.scanner.Next() + chs += string(ch) + fallthrough + case '+', '-', '?': word, _, err := sw.processStopOn('}') if err != nil { if sw.scanner.Peek() == scanner.EOF { @@ -378,53 +362,44 @@ func (sw *shellWord) processDollar() (string, error) { // Grab the current value of the variable in question so we // can use it to determine what to do based on the modifier - newValue, found := sw.getEnv(name) - - switch modifier { - case '+': - if newValue != "" { - newValue = word - } - if !found && sw.skipUnsetEnv { - return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil - } - return newValue, nil + value, set := sw.getEnv(name) + if sw.skipUnsetEnv && !set { + return fmt.Sprintf("${%s%s%s}", name, chs, word), nil + } + switch ch { case '-': - if newValue == "" { - newValue = word + if !set || (nullIsUnset && value == "") { + return word, nil } - if !found && sw.skipUnsetEnv { - return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil + return value, nil + case '+': + if !set || (nullIsUnset && value == "") { + return "", nil } - - return newValue, nil - + return word, nil case '?': - if !found { - if sw.skipUnsetEnv { - return fmt.Sprintf("${%s:%s%s}", name, string(modifier), word), nil - } + if !set { message := "is not allowed to be unset" if word != "" { message = word } return "", errors.Errorf("%s: %s", name, message) } - if newValue == "" { + if nullIsUnset && value == "" { message := "is not allowed to be empty" if word != "" { message = word } return "", errors.Errorf("%s: %s", name, message) } - return newValue, nil - + return value, nil default: - return "", errors.Errorf("unsupported modifier (%c) in substitution", modifier) + return "", errors.Errorf("unsupported modifier (%s) in substitution", chs) } + default: + return "", errors.Errorf("unsupported modifier (%s) in substitution", chs) } - return "", errors.Errorf("missing ':' in substitution") } func (sw *shellWord) processName() string { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/attr.go b/vendor/github.com/moby/buildkit/frontend/dockerui/attr.go new file mode 100644 index 0000000000..52ec012243 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/attr.go @@ -0,0 +1,138 @@ +package dockerui + +import ( + "encoding/csv" + "net" + "strconv" + "strings" + "time" + + "github.com/containerd/containerd/platforms" + "github.com/docker/go-units" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/solver/pb" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func parsePlatforms(v string) ([]ocispecs.Platform, error) { + var pp []ocispecs.Platform + for _, v := range strings.Split(v, ",") { + p, err := platforms.Parse(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse target platform %s", v) + } + pp = append(pp, platforms.Normalize(p)) + } + return pp, nil +} + +func parseResolveMode(v string) (llb.ResolveMode, error) { + switch v { + case pb.AttrImageResolveModeDefault, "": + return llb.ResolveModeDefault, nil + case pb.AttrImageResolveModeForcePull: + return llb.ResolveModeForcePull, nil + case pb.AttrImageResolveModePreferLocal: + return llb.ResolveModePreferLocal, nil + default: + return 0, errors.Errorf("invalid image-resolve-mode: %s", v) + } +} + +func parseExtraHosts(v string) ([]llb.HostIP, error) { + if v == "" { + return nil, nil + } + out := make([]llb.HostIP, 0) + csvReader := csv.NewReader(strings.NewReader(v)) + fields, err := csvReader.Read() + if err != nil { + return nil, err + } + for _, field := range fields { + key, val, ok := strings.Cut(strings.ToLower(field), "=") + if !ok { + return nil, errors.Errorf("invalid key-value pair %s", field) + } + ip := net.ParseIP(val) + if ip == nil { + return nil, errors.Errorf("failed to parse IP %s", val) + } + out = append(out, llb.HostIP{Host: key, IP: ip}) + } + return out, nil +} + +func parseShmSize(v string) (int64, error) { + if len(v) == 0 { + return 0, nil + } + kb, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return 0, err + } + return kb, nil +} + +func parseUlimits(v string) ([]pb.Ulimit, error) { + if v == "" { + return nil, nil + } + out := make([]pb.Ulimit, 0) + csvReader := csv.NewReader(strings.NewReader(v)) + fields, err := csvReader.Read() + if err != nil { + return nil, err + } + for _, field := range fields { + ulimit, err := units.ParseUlimit(field) + if err != nil { + return nil, err + } + out = append(out, pb.Ulimit{ + Name: ulimit.Name, + Soft: ulimit.Soft, + Hard: ulimit.Hard, + }) + } + return out, nil +} + +func parseNetMode(v string) (pb.NetMode, error) { + if v == "" { + return llb.NetModeSandbox, nil + } + switch v { + case "none": + return llb.NetModeNone, nil + case "host": + return llb.NetModeHost, nil + case "sandbox": + return llb.NetModeSandbox, nil + default: + return 0, errors.Errorf("invalid netmode %s", v) + } +} + +func parseSourceDateEpoch(v string) (*time.Time, error) { + if v == "" { + return nil, nil + } + sde, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "invalid SOURCE_DATE_EPOCH: %s", v) + } + tm := time.Unix(sde, 0).UTC() + return &tm, nil +} + +func filter(opt map[string]string, key string) map[string]string { + m := map[string]string{} + for k, v := range opt { + if strings.HasPrefix(k, key) { + m[strings.TrimPrefix(k, key)] = v + } + } + return m +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/build.go b/vendor/github.com/moby/buildkit/frontend/dockerui/build.go new file mode 100644 index 0000000000..8fc9bbbff1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/build.go @@ -0,0 +1,114 @@ +package dockerui + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/containerd/containerd/platforms" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/exporter/containerimage/image" + "github.com/moby/buildkit/frontend/gateway/client" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +type BuildFunc func(ctx context.Context, platform *ocispecs.Platform, idx int) (client.Reference, *image.Image, error) + +func (bc *Client) Build(ctx context.Context, fn BuildFunc) (*ResultBuilder, error) { + res := client.NewResult() + + targets := make([]*ocispecs.Platform, 0, len(bc.TargetPlatforms)) + for _, p := range bc.TargetPlatforms { + p := p + targets = append(targets, &p) + } + if len(targets) == 0 { + targets = append(targets, nil) + } + expPlatforms := &exptypes.Platforms{ + Platforms: make([]exptypes.Platform, len(targets)), + } + + eg, ctx := errgroup.WithContext(ctx) + + for i, tp := range targets { + i, tp := i, tp + eg.Go(func() error { + ref, img, err := fn(ctx, tp, i) + if err != nil { + return err + } + + config, err := json.Marshal(img) + if err != nil { + return errors.Wrapf(err, "failed to marshal image config") + } + + p := platforms.DefaultSpec() + if tp != nil { + p = *tp + } + + // in certain conditions we allow input platform to be extended from base image + if p.OS == "windows" && img.OS == p.OS { + if p.OSVersion == "" && img.OSVersion != "" { + p.OSVersion = img.OSVersion + } + if p.OSFeatures == nil && len(img.OSFeatures) > 0 { + p.OSFeatures = img.OSFeatures + } + } + + p = platforms.Normalize(p) + k := platforms.Format(p) + + if bc.MultiPlatformRequested { + res.AddRef(k, ref) + res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, k), config) + } else { + res.SetRef(ref) + res.AddMeta(exptypes.ExporterImageConfigKey, config) + } + expPlatforms.Platforms[i] = exptypes.Platform{ + ID: k, + Platform: p, + } + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, err + } + return &ResultBuilder{ + Result: res, + expPlatforms: expPlatforms, + }, nil +} + +type ResultBuilder struct { + *client.Result + expPlatforms *exptypes.Platforms +} + +func (rb *ResultBuilder) Finalize() (*client.Result, error) { + dt, err := json.Marshal(rb.expPlatforms) + if err != nil { + return nil, err + } + rb.AddMeta(exptypes.ExporterPlatformsKey, dt) + + return rb.Result, nil +} + +func (rb *ResultBuilder) EachPlatform(ctx context.Context, fn func(ctx context.Context, id string, p ocispecs.Platform) error) error { + eg, ctx := errgroup.WithContext(ctx) + for _, p := range rb.expPlatforms.Platforms { + p := p + eg.Go(func() error { + return fn(ctx, p.ID, p.Platform) + }) + } + return eg.Wait() +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/config.go b/vendor/github.com/moby/buildkit/frontend/dockerui/config.go new file mode 100644 index 0000000000..12ec2c6880 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/config.go @@ -0,0 +1,496 @@ +package dockerui + +import ( + "bytes" + "context" + "encoding/json" + "path" + "strconv" + "strings" + "time" + + "github.com/containerd/containerd/platforms" + "github.com/docker/distribution/reference" + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/exporter/containerimage/image" + "github.com/moby/buildkit/frontend/attestations" + "github.com/moby/buildkit/frontend/dockerfile/dockerignore" + "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/flightcontrol" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + buildArgPrefix = "build-arg:" + labelPrefix = "label:" + + keyTarget = "target" + keyCgroupParent = "cgroup-parent" + keyForceNetwork = "force-network-mode" + keyGlobalAddHosts = "add-hosts" + keyHostname = "hostname" + keyImageResolveMode = "image-resolve-mode" + keyMultiPlatform = "multi-platform" + keyNoCache = "no-cache" + keyShmSize = "shm-size" + keyTargetPlatform = "platform" + keyUlimit = "ulimit" + keyCacheFrom = "cache-from" // for registry only. deprecated in favor of keyCacheImports + keyCacheImports = "cache-imports" // JSON representation of []CacheOptionsEntry + + // Don't forget to update frontend documentation if you add + // a new build-arg: frontend/dockerfile/docs/reference.md + keyCacheNSArg = "build-arg:BUILDKIT_CACHE_MOUNT_NS" + keyMultiPlatformArg = "build-arg:BUILDKIT_MULTI_PLATFORM" + keyHostnameArg = "build-arg:BUILDKIT_SANDBOX_HOSTNAME" + keyContextKeepGitDirArg = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR" + keySourceDateEpoch = "build-arg:SOURCE_DATE_EPOCH" +) + +type Config struct { + BuildArgs map[string]string + CacheIDNamespace string + CgroupParent string + Epoch *time.Time + ExtraHosts []llb.HostIP + Hostname string + ImageResolveMode llb.ResolveMode + Labels map[string]string + NetworkMode pb.NetMode + ShmSize int64 + Target string + Ulimits []pb.Ulimit + + CacheImports []client.CacheOptionsEntry + TargetPlatforms []ocispecs.Platform // nil means default + BuildPlatforms []ocispecs.Platform + MultiPlatformRequested bool + SBOM *SBOM +} + +type Client struct { + Config + client client.Client + ignoreCache []string + bctx *buildContext + g flightcontrol.Group[*buildContext] + bopts client.BuildOpts + + dockerignore []byte +} + +type SBOM struct { + Generator string +} + +type Source struct { + *llb.SourceMap + Warn func(context.Context, string, client.WarnOpts) +} + +type ContextOpt struct { + NoDockerignore bool + LocalOpts []llb.LocalOption + Platform *ocispecs.Platform + ResolveMode string + CaptureDigest *digest.Digest +} + +func validateMinCaps(c client.Client) error { + opts := c.BuildOpts().Opts + caps := c.BuildOpts().LLBCaps + + if err := caps.Supports(pb.CapFileBase); err != nil { + return errors.Wrap(err, "needs BuildKit 0.5 or later") + } + if opts["override-copy-image"] != "" { + return errors.New("support for \"override-copy-image\" was removed in BuildKit 0.11") + } + if v, ok := opts["build-arg:BUILDKIT_DISABLE_FILEOP"]; ok { + if b, err := strconv.ParseBool(v); err == nil && b { + return errors.New("support for \"BUILDKIT_DISABLE_FILEOP\" build-arg was removed in BuildKit 0.11") + } + } + return nil +} + +func NewClient(c client.Client) (*Client, error) { + if err := validateMinCaps(c); err != nil { + return nil, err + } + + bc := &Client{ + client: c, + bopts: c.BuildOpts(), // avoid grpc on every call + } + + if err := bc.init(); err != nil { + return nil, err + } + + return bc, nil +} + +func (bc *Client) BuildOpts() client.BuildOpts { + return bc.bopts +} + +func (bc *Client) init() error { + opts := bc.bopts.Opts + + defaultBuildPlatform := platforms.Normalize(platforms.DefaultSpec()) + if workers := bc.bopts.Workers; len(workers) > 0 && len(workers[0].Platforms) > 0 { + defaultBuildPlatform = workers[0].Platforms[0] + } + buildPlatforms := []ocispecs.Platform{defaultBuildPlatform} + targetPlatforms := []ocispecs.Platform{} + if v := opts[keyTargetPlatform]; v != "" { + var err error + targetPlatforms, err = parsePlatforms(v) + if err != nil { + return err + } + } + bc.BuildPlatforms = buildPlatforms + bc.TargetPlatforms = targetPlatforms + + resolveMode, err := parseResolveMode(opts[keyImageResolveMode]) + if err != nil { + return err + } + bc.ImageResolveMode = resolveMode + + extraHosts, err := parseExtraHosts(opts[keyGlobalAddHosts]) + if err != nil { + return errors.Wrap(err, "failed to parse additional hosts") + } + bc.ExtraHosts = extraHosts + + shmSize, err := parseShmSize(opts[keyShmSize]) + if err != nil { + return errors.Wrap(err, "failed to parse shm size") + } + bc.ShmSize = shmSize + + ulimits, err := parseUlimits(opts[keyUlimit]) + if err != nil { + return errors.Wrap(err, "failed to parse ulimit") + } + bc.Ulimits = ulimits + + defaultNetMode, err := parseNetMode(opts[keyForceNetwork]) + if err != nil { + return err + } + bc.NetworkMode = defaultNetMode + + var ignoreCache []string + if v, ok := opts[keyNoCache]; ok { + if v == "" { + ignoreCache = []string{} // means all stages + } else { + ignoreCache = strings.Split(v, ",") + } + } + bc.ignoreCache = ignoreCache + + multiPlatform := len(targetPlatforms) > 1 + if v := opts[keyMultiPlatformArg]; v != "" { + opts[keyMultiPlatform] = v + } + if v := opts[keyMultiPlatform]; v != "" { + b, err := strconv.ParseBool(v) + if err != nil { + return errors.Errorf("invalid boolean value for multi-platform: %s", v) + } + if !b && multiPlatform { + return errors.Errorf("conflicting config: returning multiple target platforms is not allowed") + } + multiPlatform = b + } + bc.MultiPlatformRequested = multiPlatform + + var cacheImports []client.CacheOptionsEntry + // new API + if cacheImportsStr := opts[keyCacheImports]; cacheImportsStr != "" { + var cacheImportsUM []controlapi.CacheOptionsEntry + if err := json.Unmarshal([]byte(cacheImportsStr), &cacheImportsUM); err != nil { + return errors.Wrapf(err, "failed to unmarshal %s (%q)", keyCacheImports, cacheImportsStr) + } + for _, um := range cacheImportsUM { + cacheImports = append(cacheImports, client.CacheOptionsEntry{Type: um.Type, Attrs: um.Attrs}) + } + } + // old API + if cacheFromStr := opts[keyCacheFrom]; cacheFromStr != "" { + cacheFrom := strings.Split(cacheFromStr, ",") + for _, s := range cacheFrom { + im := client.CacheOptionsEntry{ + Type: "registry", + Attrs: map[string]string{ + "ref": s, + }, + } + // FIXME(AkihiroSuda): skip append if already exists + cacheImports = append(cacheImports, im) + } + } + bc.CacheImports = cacheImports + + epoch, err := parseSourceDateEpoch(opts[keySourceDateEpoch]) + if err != nil { + return err + } + bc.Epoch = epoch + + attests, err := attestations.Parse(opts) + if err != nil { + return err + } + if attrs, ok := attests[attestations.KeyTypeSbom]; ok { + src, ok := attrs["generator"] + if !ok { + return errors.Errorf("sbom scanner cannot be empty") + } + ref, err := reference.ParseNormalizedNamed(src) + if err != nil { + return errors.Wrapf(err, "failed to parse sbom scanner %s", src) + } + ref = reference.TagNameOnly(ref) + bc.SBOM = &SBOM{ + Generator: ref.String(), + } + } + + bc.BuildArgs = filter(opts, buildArgPrefix) + bc.Labels = filter(opts, labelPrefix) + bc.CacheIDNamespace = opts[keyCacheNSArg] + bc.CgroupParent = opts[keyCgroupParent] + bc.Target = opts[keyTarget] + + if v, ok := opts[keyHostnameArg]; ok && len(v) > 0 { + opts[keyHostname] = v + } + bc.Hostname = opts[keyHostname] + return nil +} + +func (bc *Client) buildContext(ctx context.Context) (*buildContext, error) { + return bc.g.Do(ctx, "initcontext", func(ctx context.Context) (*buildContext, error) { + if bc.bctx != nil { + return bc.bctx, nil + } + bctx, err := bc.initContext(ctx) + if err == nil { + bc.bctx = bctx + } + return bctx, err + }) +} + +func (bc *Client) ReadEntrypoint(ctx context.Context, lang string, opts ...llb.LocalOption) (*Source, error) { + bctx, err := bc.buildContext(ctx) + if err != nil { + return nil, err + } + + var src *llb.State + + if !bctx.forceLocalDockerfile { + if bctx.dockerfile != nil { + src = bctx.dockerfile + } + } + + if src == nil { + name := "load build definition from " + bctx.filename + + filenames := []string{bctx.filename, bctx.filename + ".dockerignore"} + + // dockerfile is also supported casing moby/moby#10858 + if path.Base(bctx.filename) == DefaultDockerfileName { + filenames = append(filenames, path.Join(path.Dir(bctx.filename), strings.ToLower(DefaultDockerfileName))) + } + + opts = append([]llb.LocalOption{ + llb.FollowPaths(filenames), + llb.SessionID(bc.bopts.SessionID), + llb.SharedKeyHint(bctx.dockerfileLocalName), + WithInternalName(name), + llb.Differ(llb.DiffNone, false), + }, opts...) + + lsrc := llb.Local(bctx.dockerfileLocalName, opts...) + src = &lsrc + } + + def, err := src.Marshal(ctx, bc.marshalOpts()...) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal local source") + } + + defVtx, err := def.Head() + if err != nil { + return nil, err + } + + res, err := bc.client.Solve(ctx, client.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, errors.Wrapf(err, "failed to resolve dockerfile") + } + + ref, err := res.SingleRef() + if err != nil { + return nil, err + } + + dt, err := ref.ReadFile(ctx, client.ReadRequest{ + Filename: bctx.filename, + }) + if err != nil { + if path.Base(bctx.filename) == DefaultDockerfileName { + var err1 error + dt, err1 = ref.ReadFile(ctx, client.ReadRequest{ + Filename: path.Join(path.Dir(bctx.filename), strings.ToLower(DefaultDockerfileName)), + }) + if err1 == nil { + err = nil + } + } + if err != nil { + return nil, errors.Wrapf(err, "failed to read dockerfile") + } + } + smap := llb.NewSourceMap(src, bctx.filename, lang, dt) + smap.Definition = def + + dt, err = ref.ReadFile(ctx, client.ReadRequest{ + Filename: bctx.filename + ".dockerignore", + }) + if err == nil { + bc.dockerignore = dt + } + + return &Source{ + SourceMap: smap, + Warn: func(ctx context.Context, msg string, opts client.WarnOpts) { + if opts.Level == 0 { + opts.Level = 1 + } + if opts.SourceInfo == nil { + opts.SourceInfo = &pb.SourceInfo{ + Data: smap.Data, + Filename: smap.Filename, + Language: smap.Language, + Definition: smap.Definition.ToPB(), + } + } + bc.client.Warn(ctx, defVtx, msg, opts) + }, + }, nil +} + +func (bc *Client) MainContext(ctx context.Context, opts ...llb.LocalOption) (*llb.State, error) { + bctx, err := bc.buildContext(ctx) + if err != nil { + return nil, err + } + + if bctx.context != nil { + return bctx.context, nil + } + + if bc.dockerignore == nil { + st := llb.Local(bctx.contextLocalName, + llb.SessionID(bc.bopts.SessionID), + llb.FollowPaths([]string{DefaultDockerignoreName}), + llb.SharedKeyHint(bctx.contextLocalName+"-"+DefaultDockerignoreName), + WithInternalName("load "+DefaultDockerignoreName), + llb.Differ(llb.DiffNone, false), + ) + def, err := st.Marshal(ctx, bc.marshalOpts()...) + if err != nil { + return nil, err + } + res, err := bc.client.Solve(ctx, client.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + ref, err := res.SingleRef() + if err != nil { + return nil, err + } + dt, _ := ref.ReadFile(ctx, client.ReadRequest{ // ignore error + Filename: DefaultDockerignoreName, + }) + if dt == nil { + dt = []byte{} + } + bc.dockerignore = dt + } + + var excludes []string + if len(bc.dockerignore) != 0 { + excludes, err = dockerignore.ReadAll(bytes.NewBuffer(bc.dockerignore)) + if err != nil { + return nil, errors.Wrap(err, "failed to parse dockerignore") + } + } + + opts = append([]llb.LocalOption{ + llb.SessionID(bc.bopts.SessionID), + llb.ExcludePatterns(excludes), + llb.SharedKeyHint(bctx.contextLocalName), + WithInternalName("load build context"), + }, opts...) + + st := llb.Local(bctx.contextLocalName, opts...) + + return &st, nil +} + +func (bc *Client) NamedContext(ctx context.Context, name string, opt ContextOpt) (*llb.State, *image.Image, error) { + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, nil, errors.Wrapf(err, "invalid context name %s", name) + } + name = strings.TrimSuffix(reference.FamiliarString(named), ":latest") + + pp := platforms.DefaultSpec() + if opt.Platform != nil { + pp = *opt.Platform + } + pname := name + "::" + platforms.Format(platforms.Normalize(pp)) + st, img, err := bc.namedContext(ctx, name, pname, opt) + if err != nil { + return nil, nil, err + } + if st != nil { + return st, img, nil + } + return bc.namedContext(ctx, name, name, opt) +} + +func (bc *Client) IsNoCache(name string) bool { + if len(bc.ignoreCache) == 0 { + return bc.ignoreCache != nil + } + for _, n := range bc.ignoreCache { + if strings.EqualFold(n, name) { + return true + } + } + return false +} + +func WithInternalName(name string) llb.ConstraintsOpt { + return llb.WithCustomName("[internal] " + name) +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/context.go b/vendor/github.com/moby/buildkit/frontend/dockerui/context.go new file mode 100644 index 0000000000..3173558fd6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/context.go @@ -0,0 +1,194 @@ +package dockerui + +import ( + "archive/tar" + "bytes" + "context" + "path/filepath" + "regexp" + "strconv" + + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/frontend/gateway/client" + gwpb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/util/gitutil" + "github.com/pkg/errors" +) + +const ( + DefaultLocalNameContext = "context" + DefaultLocalNameDockerfile = "dockerfile" + DefaultDockerfileName = "Dockerfile" + DefaultDockerignoreName = ".dockerignore" + EmptyImageName = "scratch" +) + +const ( + keyFilename = "filename" + keyContextSubDir = "contextsubdir" + keyNameContext = "contextkey" + keyNameDockerfile = "dockerfilekey" +) + +var httpPrefix = regexp.MustCompile(`^https?://`) + +type buildContext struct { + context *llb.State // set if not local + dockerfile *llb.State // override remoteContext if set + contextLocalName string + dockerfileLocalName string + filename string + forceLocalDockerfile bool +} + +func (bc *Client) marshalOpts() []llb.ConstraintsOpt { + return []llb.ConstraintsOpt{llb.WithCaps(bc.bopts.Caps)} +} + +func (bc *Client) initContext(ctx context.Context) (*buildContext, error) { + opts := bc.bopts.Opts + gwcaps := bc.bopts.Caps + + localNameContext := DefaultLocalNameContext + if v, ok := opts[keyNameContext]; ok { + localNameContext = v + } + + bctx := &buildContext{ + contextLocalName: DefaultLocalNameContext, + dockerfileLocalName: DefaultLocalNameDockerfile, + filename: DefaultDockerfileName, + } + + if v, ok := opts[keyFilename]; ok { + bctx.filename = v + } + + if v, ok := opts[keyNameDockerfile]; ok { + bctx.forceLocalDockerfile = true + bctx.dockerfileLocalName = v + } + + keepGit := false + if v, err := strconv.ParseBool(opts[keyContextKeepGitDirArg]); err == nil { + keepGit = v + } + if st, ok := DetectGitContext(opts[localNameContext], keepGit); ok { + bctx.context = st + bctx.dockerfile = st + } else if st, filename, ok := DetectHTTPContext(opts[localNameContext]); ok { + def, err := st.Marshal(ctx, bc.marshalOpts()...) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal httpcontext") + } + res, err := bc.client.Solve(ctx, client.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, errors.Wrapf(err, "failed to resolve httpcontext") + } + + ref, err := res.SingleRef() + if err != nil { + return nil, err + } + + dt, err := ref.ReadFile(ctx, client.ReadRequest{ + Filename: filename, + Range: &client.FileRange{ + Length: 1024, + }, + }) + if err != nil { + return nil, errors.Wrapf(err, "failed to read downloaded context") + } + if isArchive(dt) { + bc := llb.Scratch().File(llb.Copy(*st, filepath.Join("/", filename), "/", &llb.CopyInfo{ + AttemptUnpack: true, + })) + bctx.context = &bc + } else { + bctx.filename = filename + bctx.context = st + } + bctx.dockerfile = bctx.context + } else if (&gwcaps).Supports(gwpb.CapFrontendInputs) == nil { + inputs, err := bc.client.Inputs(ctx) + if err != nil { + return nil, errors.Wrapf(err, "failed to get frontend inputs") + } + + if !bctx.forceLocalDockerfile { + inputDockerfile, ok := inputs[bctx.dockerfileLocalName] + if ok { + bctx.dockerfile = &inputDockerfile + } + } + + inputCtx, ok := inputs[DefaultLocalNameContext] + if ok { + bctx.context = &inputCtx + } + } + + if bctx.context != nil { + if sub, ok := opts[keyContextSubDir]; ok { + bctx.context = scopeToSubDir(bctx.context, sub) + } + } + + return bctx, nil +} + +func DetectGitContext(ref string, keepGit bool) (*llb.State, bool) { + g, err := gitutil.ParseGitRef(ref) + if err != nil { + return nil, false + } + commit := g.Commit + if g.SubDir != "" { + commit += ":" + g.SubDir + } + gitOpts := []llb.GitOption{WithInternalName("load git source " + ref)} + if keepGit { + gitOpts = append(gitOpts, llb.KeepGitDir()) + } + + st := llb.Git(g.Remote, commit, gitOpts...) + return &st, true +} + +func DetectHTTPContext(ref string) (*llb.State, string, bool) { + filename := "context" + if httpPrefix.MatchString(ref) { + st := llb.HTTP(ref, llb.Filename(filename), WithInternalName("load remote build context")) + return &st, filename, true + } + return nil, "", false +} + +func isArchive(header []byte) bool { + for _, m := range [][]byte{ + {0x42, 0x5A, 0x68}, // bzip2 + {0x1F, 0x8B, 0x08}, // gzip + {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, // xz + } { + if len(header) < len(m) { + continue + } + if bytes.Equal(m, header[:len(m)]) { + return true + } + } + + r := tar.NewReader(bytes.NewBuffer(header)) + _, err := r.Next() + return err == nil +} + +func scopeToSubDir(c *llb.State, dir string) *llb.State { + bc := llb.Scratch().File(llb.Copy(*c, dir, "/", &llb.CopyInfo{ + CopyDirContentsOnly: true, + })) + return &bc +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/namedcontext.go b/vendor/github.com/moby/buildkit/frontend/dockerui/namedcontext.go new file mode 100644 index 0000000000..6a441c5082 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/namedcontext.go @@ -0,0 +1,253 @@ +package dockerui + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/exporter/containerimage/image" + "github.com/moby/buildkit/frontend/dockerfile/dockerignore" + "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/util/imageutil" + "github.com/pkg/errors" +) + +const ( + contextPrefix = "context:" + inputMetadataPrefix = "input-metadata:" + maxContextRecursion = 10 +) + +func (bc *Client) namedContext(ctx context.Context, name string, nameWithPlatform string, opt ContextOpt) (*llb.State, *image.Image, error) { + return bc.namedContextRecursive(ctx, name, nameWithPlatform, opt, 0) +} + +func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWithPlatform string, opt ContextOpt, count int) (*llb.State, *image.Image, error) { + opts := bc.bopts.Opts + v, ok := opts[contextPrefix+nameWithPlatform] + if !ok { + return nil, nil, nil + } + + if count > maxContextRecursion { + return nil, nil, errors.New("context recursion limit exceeded; this may indicate a cycle in the provided source policies: " + v) + } + + vv := strings.SplitN(v, ":", 2) + if len(vv) != 2 { + return nil, nil, errors.Errorf("invalid context specifier %s for %s", v, nameWithPlatform) + } + + // allow git@ without protocol for SSH URLs for backwards compatibility + if strings.HasPrefix(vv[0], "git@") { + vv[0] = "git" + } + switch vv[0] { + case "docker-image": + ref := strings.TrimPrefix(vv[1], "//") + if ref == EmptyImageName { + st := llb.Scratch() + return &st, nil, nil + } + + imgOpt := []llb.ImageOption{ + llb.WithCustomName("[context " + nameWithPlatform + "] " + ref), + } + if opt.Platform != nil { + imgOpt = append(imgOpt, llb.Platform(*opt.Platform)) + } + + named, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, nil, err + } + + named = reference.TagNameOnly(named) + + ref, dgst, data, err := bc.client.ResolveImageConfig(ctx, named.String(), llb.ResolveImageConfigOpt{ + Platform: opt.Platform, + ResolveMode: opt.ResolveMode, + LogName: fmt.Sprintf("[context %s] load metadata for %s", nameWithPlatform, ref), + ResolverType: llb.ResolverTypeRegistry, + }) + if err != nil { + e := &imageutil.ResolveToNonImageError{} + if errors.As(err, &e) { + return bc.namedContextRecursive(ctx, e.Updated, name, opt, count+1) + } + return nil, nil, err + } + + var img image.Image + if err := json.Unmarshal(data, &img); err != nil { + return nil, nil, err + } + img.Created = nil + + st := llb.Image(ref, imgOpt...) + st, err = st.WithImageConfig(data) + if err != nil { + return nil, nil, err + } + if opt.CaptureDigest != nil { + *opt.CaptureDigest = dgst + } + return &st, &img, nil + case "git": + st, ok := DetectGitContext(v, true) + if !ok { + return nil, nil, errors.Errorf("invalid git context %s", v) + } + return st, nil, nil + case "http", "https": + st, ok := DetectGitContext(v, true) + if !ok { + httpst := llb.HTTP(v, llb.WithCustomName("[context "+nameWithPlatform+"] "+v)) + st = &httpst + } + return st, nil, nil + case "oci-layout": + refSpec := strings.TrimPrefix(vv[1], "//") + ref, err := reference.Parse(refSpec) + if err != nil { + return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", refSpec) + } + named, ok := ref.(reference.Named) + if !ok { + return nil, nil, errors.Errorf("oci-layout reference %q has no name", ref.String()) + } + dgstd, ok := named.(reference.Digested) + if !ok { + return nil, nil, errors.Errorf("oci-layout reference %q has no digest", named.String()) + } + + // for the dummy ref primarily used in log messages, we can use the + // original name, since the store key may not be significant + dummyRef, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", name) + } + dummyRef, err = reference.WithDigest(dummyRef, dgstd.Digest()) + if err != nil { + return nil, nil, errors.Wrapf(err, "could not wrap %q with digest", name) + } + + // TODO: How should source policy be handled here with a dummy ref? + _, dgst, data, err := bc.client.ResolveImageConfig(ctx, dummyRef.String(), llb.ResolveImageConfigOpt{ + Platform: opt.Platform, + ResolveMode: opt.ResolveMode, + LogName: fmt.Sprintf("[context %s] load metadata for %s", nameWithPlatform, dummyRef.String()), + ResolverType: llb.ResolverTypeOCILayout, + Store: llb.ResolveImageConfigOptStore{ + SessionID: bc.bopts.SessionID, + StoreID: named.Name(), + }, + }) + if err != nil { + return nil, nil, err + } + + var img image.Image + if err := json.Unmarshal(data, &img); err != nil { + return nil, nil, errors.Wrap(err, "could not parse oci-layout image config") + } + + ociOpt := []llb.OCILayoutOption{ + llb.WithCustomName("[context " + nameWithPlatform + "] OCI load from client"), + llb.OCIStore(bc.bopts.SessionID, named.Name()), + } + if opt.Platform != nil { + ociOpt = append(ociOpt, llb.Platform(*opt.Platform)) + } + st := llb.OCILayout( + dummyRef.String(), + ociOpt..., + ) + st, err = st.WithImageConfig(data) + if err != nil { + return nil, nil, err + } + if opt.CaptureDigest != nil { + *opt.CaptureDigest = dgst + } + return &st, &img, nil + case "local": + st := llb.Local(vv[1], + llb.SessionID(bc.bopts.SessionID), + llb.FollowPaths([]string{DefaultDockerignoreName}), + llb.SharedKeyHint("context:"+nameWithPlatform+"-"+DefaultDockerignoreName), + llb.WithCustomName("[context "+nameWithPlatform+"] load "+DefaultDockerignoreName), + llb.Differ(llb.DiffNone, false), + ) + def, err := st.Marshal(ctx) + if err != nil { + return nil, nil, err + } + res, err := bc.client.Solve(ctx, client.SolveRequest{ + Evaluate: true, + Definition: def.ToPB(), + }) + if err != nil { + return nil, nil, err + } + ref, err := res.SingleRef() + if err != nil { + return nil, nil, err + } + var excludes []string + if !opt.NoDockerignore { + dt, _ := ref.ReadFile(ctx, client.ReadRequest{ + Filename: DefaultDockerignoreName, + }) // error ignored + + if len(dt) != 0 { + excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dt)) + if err != nil { + return nil, nil, err + } + } + } + st = llb.Local(vv[1], + llb.WithCustomName("[context "+nameWithPlatform+"] load from client"), + llb.SessionID(bc.bopts.SessionID), + llb.SharedKeyHint("context:"+nameWithPlatform), + llb.ExcludePatterns(excludes), + ) + return &st, nil, nil + case "input": + inputs, err := bc.client.Inputs(ctx) + if err != nil { + return nil, nil, err + } + st, ok := inputs[vv[1]] + if !ok { + return nil, nil, errors.Errorf("invalid input %s for %s", vv[1], nameWithPlatform) + } + md, ok := opts[inputMetadataPrefix+vv[1]] + if ok { + m := make(map[string][]byte) + if err := json.Unmarshal([]byte(md), &m); err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse input metadata %s", md) + } + var img *image.Image + if dtic, ok := m[exptypes.ExporterImageConfigKey]; ok { + st, err = st.WithImageConfig(dtic) + if err != nil { + return nil, nil, err + } + if err := json.Unmarshal(dtic, &img); err != nil { + return nil, nil, errors.Wrapf(err, "failed to parse image config for %s", nameWithPlatform) + } + } + return &st, img, nil + } + return &st, nil, nil + default: + return nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], nameWithPlatform) + } +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/requests.go b/vendor/github.com/moby/buildkit/frontend/dockerui/requests.go new file mode 100644 index 0000000000..7900a0c7a5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerui/requests.go @@ -0,0 +1,91 @@ +package dockerui + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/frontend/subrequests" + "github.com/moby/buildkit/frontend/subrequests/outline" + "github.com/moby/buildkit/frontend/subrequests/targets" + "github.com/moby/buildkit/solver/errdefs" +) + +const ( + keyRequestID = "requestid" +) + +type RequestHandler struct { + Outline func(context.Context) (*outline.Outline, error) + ListTargets func(context.Context) (*targets.List, error) + AllowOther bool +} + +func (bc *Client) HandleSubrequest(ctx context.Context, h RequestHandler) (*client.Result, bool, error) { + req, ok := bc.bopts.Opts[keyRequestID] + if !ok { + return nil, false, nil + } + switch req { + case subrequests.RequestSubrequestsDescribe: + res, err := describe(h) + return res, true, err + case outline.SubrequestsOutlineDefinition.Name: + if f := h.Outline; f != nil { + o, err := f(ctx) + if err != nil { + return nil, false, err + } + if o == nil { + return nil, true, nil + } + res, err := o.ToResult() + return res, true, err + } + case targets.SubrequestsTargetsDefinition.Name: + if f := h.ListTargets; f != nil { + targets, err := f(ctx) + if err != nil { + return nil, false, err + } + if targets == nil { + return nil, true, nil + } + res, err := targets.ToResult() + return res, true, err + } + } + if h.AllowOther { + return nil, false, nil + } + return nil, false, errdefs.NewUnsupportedSubrequestError(req) +} + +func describe(h RequestHandler) (*client.Result, error) { + all := []subrequests.Request{} + if h.Outline != nil { + all = append(all, outline.SubrequestsOutlineDefinition) + } + if h.ListTargets != nil { + all = append(all, targets.SubrequestsTargetsDefinition) + } + all = append(all, subrequests.SubrequestsDescribeDefinition) + dt, err := json.MarshalIndent(all, "", " ") + if err != nil { + return nil, err + } + + b := bytes.NewBuffer(nil) + if err := subrequests.PrintDescribe(dt, b); err != nil { + return nil, err + } + + res := client.NewResult() + res.Metadata = map[string][]byte{ + "result.json": dt, + "result.txt": b.Bytes(), + "version": []byte(subrequests.SubrequestsDescribeDefinition.Version), + } + return res, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/frontend.go b/vendor/github.com/moby/buildkit/frontend/frontend.go index 024ac80204..fb89a8414a 100644 --- a/vendor/github.com/moby/buildkit/frontend/frontend.go +++ b/vendor/github.com/moby/buildkit/frontend/frontend.go @@ -22,7 +22,7 @@ type Frontend interface { type FrontendLLBBridge interface { Solve(ctx context.Context, req SolveRequest, sid string) (*Result, error) - ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) + ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (string, digest.Digest, []byte, error) Warn(ctx context.Context, dgst digest.Digest, msg string, opts WarnOpts) error } diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go index 7b6b9de132..23585de907 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go @@ -27,7 +27,7 @@ func NewResult() *Result { type Client interface { Solve(ctx context.Context, req SolveRequest) (*Result, error) - ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) + ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (string, digest.Digest, []byte, error) BuildOpts() BuildOpts Inputs(ctx context.Context) (map[string]llb.State, error) NewContainer(ctx context.Context, req NewContainerRequest) (Container, error) @@ -38,6 +38,7 @@ type Client interface { // new container, without defining the initial process. type NewContainerRequest struct { Mounts []Mount + Hostname string NetMode pb.NetMode ExtraHosts []*pb.HostIP Platform *pb.Platform @@ -70,6 +71,7 @@ type Container interface { type StartRequest struct { Args []string Env []string + SecretEnv []*pb.SecretEnv User string Cwd string Tty bool diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/container.go b/vendor/github.com/moby/buildkit/frontend/gateway/container/container.go similarity index 87% rename from vendor/github.com/moby/buildkit/frontend/gateway/container.go rename to vendor/github.com/moby/buildkit/frontend/gateway/container/container.go index d6161d1def..af6476e7fc 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/container.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/container/container.go @@ -1,4 +1,4 @@ -package gateway +package container import ( "context" @@ -10,7 +10,9 @@ import ( "sync" "syscall" + "github.com/moby/buildkit/session/secrets" "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/system" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/executor" @@ -18,6 +20,7 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/llbsolver/mounts" + "github.com/moby/buildkit/solver/pb" opspb "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/stack" utilsystem "github.com/moby/buildkit/util/system" @@ -29,6 +32,7 @@ import ( type NewContainerRequest struct { ContainerID string NetMode opspb.NetMode + Hostname string ExtraHosts []executor.HostIP Mounts []Mount Platform *opspb.Platform @@ -56,9 +60,12 @@ func NewContainer(ctx context.Context, w worker.Worker, sm *session.Manager, g s ctr := &gatewayContainer{ id: req.ContainerID, netMode: req.NetMode, + hostname: req.Hostname, extraHosts: req.ExtraHosts, platform: platform, executor: w.Executor(), + sm: sm, + group: g, errGroup: eg, ctx: ctx, cancel: cancel, @@ -86,7 +93,7 @@ func NewContainer(ctx context.Context, w worker.Worker, sm *session.Manager, g s cm = refs[m.Input].Worker.CacheManager() } return cm.New(ctx, ref, g) - }) + }, platform.OS) if err != nil { for i := len(p.Actives) - 1; i >= 0; i-- { // call in LIFO order p.Actives[i].Ref.Release(context.TODO()) @@ -136,7 +143,7 @@ type MountMutableRef struct { type MakeMutable func(m *opspb.Mount, ref cache.ImmutableRef) (cache.MutableRef, error) -func PrepareMounts(ctx context.Context, mm *mounts.MountManager, cm cache.Manager, g session.Group, cwd string, mnts []*opspb.Mount, refs []*worker.WorkerRef, makeMutable MakeMutable) (p PreparedMounts, err error) { +func PrepareMounts(ctx context.Context, mm *mounts.MountManager, cm cache.Manager, g session.Group, cwd string, mnts []*opspb.Mount, refs []*worker.WorkerRef, makeMutable MakeMutable, platform string) (p PreparedMounts, err error) { // loop over all mounts, fill in mounts, root and outputs for i, m := range mnts { var ( @@ -255,11 +262,11 @@ func PrepareMounts(ctx context.Context, mm *mounts.MountManager, cm cache.Manage }) root = active } - p.Root = mountWithSession(root, g) + p.Root = MountWithSession(root, g) } else { - mws := mountWithSession(mountable, g) + mws := MountWithSession(mountable, g) dest := m.Dest - if !filepath.IsAbs(filepath.Clean(dest)) { + if !system.IsAbs(filepath.Clean(dest), platform) { dest = filepath.Join("/", cwd, dest) } mws.Dest = dest @@ -280,11 +287,14 @@ func PrepareMounts(ctx context.Context, mm *mounts.MountManager, cm cache.Manage type gatewayContainer struct { id string netMode opspb.NetMode + hostname string extraHosts []executor.HostIP platform opspb.Platform rootFS executor.Mount mounts []executor.Mount executor executor.Executor + sm *session.Manager + group session.Group started bool errGroup *errgroup.Group mu sync.Mutex @@ -304,6 +314,7 @@ func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartReques Cwd: req.Cwd, Tty: req.Tty, NetMode: gwCtr.netMode, + Hostname: gwCtr.hostname, ExtraHosts: gwCtr.extraHosts, SecurityMode: req.SecurityMode, RemoveMountStubsRecursive: req.RemoveMountStubsRecursive, @@ -322,6 +333,12 @@ func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartReques procInfo.Meta.Env = addDefaultEnvvar(procInfo.Meta.Env, "TERM", "xterm") } + secretEnv, err := gwCtr.loadSecretEnv(ctx, req.SecretEnv) + if err != nil { + return nil, err + } + procInfo.Meta.Env = append(procInfo.Meta.Env, secretEnv...) + // mark that we have started on the first call to execProcess for this // container, so that future calls will call Exec rather than Run gwCtr.mu.Lock() @@ -341,7 +358,7 @@ func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartReques startedCh := make(chan struct{}) gwProc.errGroup.Go(func() error { bklog.G(gwCtr.ctx).Debugf("Starting new container for %s with args: %q", gwCtr.id, procInfo.Meta.Args) - err := gwCtr.executor.Run(ctx, gwCtr.id, gwCtr.rootFS, gwCtr.mounts, procInfo, startedCh) + _, err := gwCtr.executor.Run(ctx, gwCtr.id, gwCtr.rootFS, gwCtr.mounts, procInfo, startedCh) return stack.Enable(err) }) select { @@ -361,6 +378,33 @@ func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartReques return gwProc, nil } +func (gwCtr *gatewayContainer) loadSecretEnv(ctx context.Context, secretEnv []*pb.SecretEnv) ([]string, error) { + out := make([]string, 0, len(secretEnv)) + for _, sopt := range secretEnv { + id := sopt.ID + if id == "" { + return nil, errors.Errorf("secret ID missing for %q environment variable", sopt.Name) + } + var dt []byte + var err error + err = gwCtr.sm.Any(ctx, gwCtr.group, func(ctx context.Context, _ string, caller session.Caller) error { + dt, err = secrets.GetSecret(ctx, caller, id) + if err != nil { + if errors.Is(err, secrets.ErrNotFound) && sopt.Optional { + return nil + } + return err + } + return nil + }) + if err != nil { + return nil, err + } + out = append(out, fmt.Sprintf("%s=%s", sopt.Name, string(dt))) + } + return out, nil +} + func (gwCtr *gatewayContainer) Release(ctx context.Context) error { gwCtr.mu.Lock() defer gwCtr.mu.Unlock() @@ -458,7 +502,7 @@ func addDefaultEnvvar(env []string, k, v string) []string { return append(env, k+"="+v) } -func mountWithSession(m cache.Mountable, g session.Group) executor.Mount { +func MountWithSession(m cache.Mountable, g session.Group) executor.Mount { _, readonly := m.(cache.ImmutableRef) return executor.Mount{ Src: &mountable{m: m, g: g}, diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/util.go b/vendor/github.com/moby/buildkit/frontend/gateway/container/util.go similarity index 96% rename from vendor/github.com/moby/buildkit/frontend/gateway/util.go rename to vendor/github.com/moby/buildkit/frontend/gateway/container/util.go index 0de8353402..1a1fb25138 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/util.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/container/util.go @@ -1,4 +1,4 @@ -package gateway +package container import ( "net" diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go index e13894ba37..0f4da47cbd 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go @@ -7,8 +7,8 @@ import ( cacheutil "github.com/moby/buildkit/cache/util" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/frontend" - "github.com/moby/buildkit/frontend/gateway" "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/frontend/gateway/container" gwpb "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" @@ -26,8 +26,8 @@ import ( "golang.org/x/sync/errgroup" ) -func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, w worker.Infos, sid string, sm *session.Manager) (*bridgeClient, error) { - bc := &bridgeClient{ +func LLBBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, w worker.Infos, sid string, sm *session.Manager) (*BridgeClient, error) { + bc := &BridgeClient{ opts: opts, inputs: inputs, FrontendLLBBridge: llbBridge, @@ -40,7 +40,7 @@ func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLL return bc, nil } -type bridgeClient struct { +type BridgeClient struct { frontend.FrontendLLBBridge mu sync.Mutex opts map[string]string @@ -54,7 +54,7 @@ type bridgeClient struct { ctrs []client.Container } -func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*client.Result, error) { +func (c *BridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*client.Result, error) { res, err := c.FrontendLLBBridge.Solve(ctx, frontend.SolveRequest{ Evaluate: req.Evaluate, Definition: req.Definition, @@ -91,7 +91,7 @@ func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*cli return cRes, nil } -func (c *bridgeClient) loadBuildOpts() client.BuildOpts { +func (c *BridgeClient) loadBuildOpts() client.BuildOpts { wis := c.workers.WorkerInfos() workers := make([]client.WorkerInfo, len(wis)) for i, w := range wis { @@ -112,11 +112,11 @@ func (c *bridgeClient) loadBuildOpts() client.BuildOpts { } } -func (c *bridgeClient) BuildOpts() client.BuildOpts { +func (c *BridgeClient) BuildOpts() client.BuildOpts { return c.buildOpts } -func (c *bridgeClient) Inputs(ctx context.Context) (map[string]llb.State, error) { +func (c *BridgeClient) Inputs(ctx context.Context) (map[string]llb.State, error) { inputs := make(map[string]llb.State) for key, def := range c.inputs { defop, err := llb.NewDefinitionOp(def) @@ -128,7 +128,7 @@ func (c *bridgeClient) Inputs(ctx context.Context) (map[string]llb.State, error) return inputs, nil } -func (c *bridgeClient) wrapSolveError(solveErr error) error { +func (c *BridgeClient) wrapSolveError(solveErr error) error { var ( ee *llberrdefs.ExecError fae *llberrdefs.FileActionError @@ -162,7 +162,7 @@ func (c *bridgeClient) wrapSolveError(solveErr error) error { return errdefs.WithSolveError(solveErr, subject, inputIDs, mountIDs) } -func (c *bridgeClient) registerResultIDs(results ...solver.Result) (ids []string, err error) { +func (c *BridgeClient) registerResultIDs(results ...solver.Result) (ids []string, err error) { c.mu.Lock() defer c.mu.Unlock() @@ -181,7 +181,7 @@ func (c *bridgeClient) registerResultIDs(results ...solver.Result) (ids []string return ids, nil } -func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, error) { +func (c *BridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, error) { if r == nil { return nil, nil } @@ -206,7 +206,7 @@ func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, err return res, nil } -func (c *bridgeClient) discard(err error) { +func (c *BridgeClient) discard(err error) { for _, ctr := range c.ctrs { ctr.Release(context.TODO()) } @@ -227,15 +227,16 @@ func (c *bridgeClient) discard(err error) { } } -func (c *bridgeClient) Warn(ctx context.Context, dgst digest.Digest, msg string, opts client.WarnOpts) error { +func (c *BridgeClient) Warn(ctx context.Context, dgst digest.Digest, msg string, opts client.WarnOpts) error { return c.FrontendLLBBridge.Warn(ctx, dgst, msg, opts) } -func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainerRequest) (client.Container, error) { - ctrReq := gateway.NewContainerRequest{ +func (c *BridgeClient) NewContainer(ctx context.Context, req client.NewContainerRequest) (client.Container, error) { + ctrReq := container.NewContainerRequest{ ContainerID: identity.NewID(), NetMode: req.NetMode, - Mounts: make([]gateway.Mount, len(req.Mounts)), + Hostname: req.Hostname, + Mounts: make([]container.Mount, len(req.Mounts)), } eg, ctx := errgroup.WithContext(ctx) @@ -266,7 +267,7 @@ func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainer return errors.Errorf("failed to find ref %s for %q mount", m.ResultID, m.Dest) } } - ctrReq.Mounts[i] = gateway.Mount{ + ctrReq.Mounts[i] = container.Mount{ WorkerRef: workerRef, Mount: &opspb.Mount{ Dest: m.Dest, @@ -287,7 +288,7 @@ func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainer return nil, err } - ctrReq.ExtraHosts, err = gateway.ParseExtraHosts(req.ExtraHosts) + ctrReq.ExtraHosts, err = container.ParseExtraHosts(req.ExtraHosts) if err != nil { return nil, err } @@ -298,7 +299,7 @@ func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainer } group := session.NewGroup(c.sid) - ctr, err := gateway.NewContainer(ctx, w, c.sm, group, ctrReq) + ctr, err := container.NewContainer(ctx, w, c.sm, group, ctrReq) if err != nil { return nil, err } @@ -306,7 +307,7 @@ func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainer return ctr, nil } -func (c *bridgeClient) newRef(r solver.ResultProxy, s session.Group) (*ref, error) { +func (c *BridgeClient) newRef(r solver.ResultProxy, s session.Group) (*ref, error) { return &ref{resultProxy: r, session: s, c: c}, nil } @@ -315,7 +316,7 @@ type ref struct { resultProxyClones []solver.ResultProxy session session.Group - c *bridgeClient + c *BridgeClient } func (r *ref) acquireResultProxy() solver.ResultProxy { diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go index 7cd25a0e8e..ae144162c9 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go @@ -23,7 +23,7 @@ type GatewayForwarder struct { } func (gf *GatewayForwarder) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*pb.Definition, sid string, sm *session.Manager) (retRes *frontend.Result, retErr error) { - c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts, inputs, gf.workers, sid, sm) + c, err := LLBBridgeToGatewayClient(ctx, llbBridge, opts, inputs, gf.workers, sid, sm) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go index 79825d0b65..eb1fb2a2b0 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go @@ -27,8 +27,12 @@ import ( "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/exporter/containerimage/image" "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/dockerui" gwclient "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/frontend/gateway/container" + "github.com/moby/buildkit/frontend/gateway/forwarder" pb "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" @@ -89,7 +93,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten } _, isDevel := opts[keyDevel] - var img ocispecs.Image + var img image.Image var mfstDigest digest.Digest var rootFS cache.MutableRef var readonly bool // TODO: try to switch to read-only by default. @@ -137,31 +141,57 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten } } } else { - sourceRef, err := reference.ParseNormalizedNamed(source) + c, err := forwarder.LLBBridgeToGatewayClient(ctx, llbBridge, opts, inputs, gf.workers, sid, sm) if err != nil { return nil, err } - - dgst, config, err := llbBridge.ResolveImageConfig(ctx, reference.TagNameOnly(sourceRef).String(), llb.ResolveImageConfigOpt{}) + dc, err := dockerui.NewClient(c) if err != nil { return nil, err } - mfstDigest = dgst - - if err := json.Unmarshal(config, &img); err != nil { + st, dockerImage, err := dc.NamedContext(ctx, source, dockerui.ContextOpt{ + CaptureDigest: &mfstDigest, + }) + if err != nil { return nil, err } - - if dgst != "" { - sourceRef, err = reference.WithDigest(sourceRef, dgst) + if dockerImage != nil { + img = *dockerImage + } + if st == nil { + sourceRef, err := reference.ParseNormalizedNamed(source) if err != nil { return nil, err } + + ref, dgst, config, err := llbBridge.ResolveImageConfig(ctx, reference.TagNameOnly(sourceRef).String(), llb.ResolveImageConfigOpt{}) + if err != nil { + return nil, err + } + + sourceRef, err = reference.ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + + mfstDigest = dgst + + if err := json.Unmarshal(config, &img); err != nil { + return nil, err + } + + if dgst != "" { + sourceRef, err = reference.WithDigest(sourceRef, dgst) + if err != nil { + return nil, err + } + } + + src := llb.Image(sourceRef.String(), &markTypeFrontend{}) + st = &src } - src := llb.Image(sourceRef.String(), &markTypeFrontend{}) - - def, err := src.Marshal(ctx) + def, err := st.Marshal(ctx) if err != nil { return nil, err } @@ -275,8 +305,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten mnts = append(mnts, *mdmnt) } - err = w.Executor().Run(ctx, "", mountWithSession(rootFS, session.NewGroup(sid)), mnts, executor.ProcessInfo{Meta: meta, Stdin: lbf.Stdin, Stdout: lbf.Stdout, Stderr: os.Stderr}, nil) - + _, err = w.Executor().Run(ctx, "", container.MountWithSession(rootFS, session.NewGroup(sid)), mnts, executor.ProcessInfo{Meta: meta, Stdin: lbf.Stdin, Stdout: lbf.Stdout, Stderr: os.Stderr}, nil) if err != nil { if errdefs.IsCanceled(ctx, err) && lbf.isErrServerClosed { err = errors.Errorf("frontend grpc server closed unexpectedly") @@ -540,7 +569,7 @@ func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.R OSFeatures: p.OSFeatures, } } - dgst, dt, err := lbf.llbBridge.ResolveImageConfig(ctx, req.Ref, llb.ResolveImageConfigOpt{ + ref, dgst, dt, err := lbf.llbBridge.ResolveImageConfig(ctx, req.Ref, llb.ResolveImageConfigOpt{ ResolverType: llb.ResolverType(req.ResolverType), Platform: platform, ResolveMode: req.ResolveMode, @@ -549,11 +578,13 @@ func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.R SessionID: req.SessionID, StoreID: req.StoreID, }, + SourcePolicies: req.SourcePolicies, }) if err != nil { return nil, err } return &pb.ResolveImageConfigResponse{ + Ref: ref, Digest: dgst, Config: dt, }, nil @@ -954,9 +985,10 @@ func (lbf *llbBridgeForwarder) Inputs(ctx context.Context, in *pb.InputsRequest) func (lbf *llbBridgeForwarder) NewContainer(ctx context.Context, in *pb.NewContainerRequest) (_ *pb.NewContainerResponse, err error) { bklog.G(ctx).Debugf("|<--- NewContainer %s", in.ContainerID) - ctrReq := NewContainerRequest{ + ctrReq := container.NewContainerRequest{ ContainerID: in.ContainerID, NetMode: in.Network, + Hostname: in.Hostname, Platform: in.Platform, Constraints: in.Constraints, } @@ -983,7 +1015,7 @@ func (lbf *llbBridgeForwarder) NewContainer(ctx context.Context, in *pb.NewConta } } } - ctrReq.Mounts = append(ctrReq.Mounts, Mount{ + ctrReq.Mounts = append(ctrReq.Mounts, container.Mount{ WorkerRef: workerRef, Mount: &opspb.Mount{ Dest: m.Dest, @@ -1006,12 +1038,12 @@ func (lbf *llbBridgeForwarder) NewContainer(ctx context.Context, in *pb.NewConta return nil, stack.Enable(err) } - ctrReq.ExtraHosts, err = ParseExtraHosts(in.ExtraHosts) + ctrReq.ExtraHosts, err = container.ParseExtraHosts(in.ExtraHosts) if err != nil { return nil, stack.Enable(err) } - ctr, err := NewContainer(context.Background(), w, lbf.sm, group, ctrReq) + ctr, err := container.NewContainer(context.Background(), w, lbf.sm, group, ctrReq) if err != nil { return nil, stack.Enable(err) } @@ -1175,7 +1207,21 @@ func (w *outputWriter) Write(msg []byte) (int, error) { return len(msg), stack.Enable(err) } +type execProcessServerThreadSafe struct { + pb.LLBBridge_ExecProcessServer + sendMu sync.Mutex +} + +func (w *execProcessServerThreadSafe) Send(m *pb.ExecMessage) error { + w.sendMu.Lock() + defer w.sendMu.Unlock() + return w.LLBBridge_ExecProcessServer.Send(m) +} + func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) error { + srv = &execProcessServerThreadSafe{ + LLBBridge_ExecProcessServer: srv, + } eg, ctx := errgroup.WithContext(srv.Context()) msgs := make(chan *pb.ExecMessage) @@ -1285,6 +1331,7 @@ func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) e proc, err := ctr.Start(initCtx, gwclient.StartRequest{ Args: init.Meta.Args, Env: init.Meta.Env, + SecretEnv: init.Secretenv, User: init.Meta.User, Cwd: init.Meta.Cwd, Tty: init.Tty, @@ -1323,7 +1370,7 @@ func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) e var statusError *rpc.Status if err != nil { statusCode = pb.UnknownExitStatus - st, _ := status.FromError(grpcerrors.ToGRPC(err)) + st, _ := status.FromError(grpcerrors.ToGRPC(ctx, err)) stp := st.Proto() statusError = &rpc.Status{ Code: stp.Code, diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go index 252617ffa0..524b3ba2a9 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go @@ -190,7 +190,7 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro } } if retError != nil { - st, _ := status.FromError(grpcerrors.ToGRPC(retError)) + st, _ := status.FromError(grpcerrors.ToGRPC(ctx, retError)) stp := st.Proto() req.Error = &rpc.Status{ Code: stp.Code, @@ -478,7 +478,7 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res * return res, nil } -func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) { +func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (string, digest.Digest, []byte, error) { var p *opspb.Platform if platform := opt.Platform; platform != nil { p = &opspb.Platform{ @@ -489,19 +489,27 @@ func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb OSFeatures: platform.OSFeatures, } } + resp, err := c.client.ResolveImageConfig(ctx, &pb.ResolveImageConfigRequest{ - ResolverType: int32(opt.ResolverType), - Ref: ref, - Platform: p, - ResolveMode: opt.ResolveMode, - LogName: opt.LogName, - SessionID: opt.Store.SessionID, - StoreID: opt.Store.StoreID, + ResolverType: int32(opt.ResolverType), + Ref: ref, + Platform: p, + ResolveMode: opt.ResolveMode, + LogName: opt.LogName, + SessionID: opt.Store.SessionID, + StoreID: opt.Store.StoreID, + SourcePolicies: opt.SourcePolicies, }) if err != nil { - return "", nil, err + return "", "", nil, err } - return resp.Digest, resp.Config, nil + newRef := resp.Ref + if newRef == "" { + // No ref returned, use the original one. + // This could occur if the version of buildkitd is too old. + newRef = ref + } + return newRef, resp.Digest, resp.Config, nil } func (c *grpcClient) BuildOpts() client.BuildOpts { @@ -792,6 +800,7 @@ func (c *grpcClient) NewContainer(ctx context.Context, req client.NewContainerRe Constraints: req.Constraints, Network: req.NetMode, ExtraHosts: req.ExtraHosts, + Hostname: req.Hostname, }) if err != nil { return nil, err @@ -805,6 +814,7 @@ func (c *grpcClient) NewContainer(ctx context.Context, req client.NewContainerRe return &container{ client: c.client, + caps: c.caps, id: id, execMsgs: c.execMsgs, }, nil @@ -812,6 +822,7 @@ func (c *grpcClient) NewContainer(ctx context.Context, req client.NewContainerRe type container struct { client pb.LLBBridgeClient + caps apicaps.CapSet id string execMsgs *messageForwarder } @@ -820,6 +831,12 @@ func (ctr *container) Start(ctx context.Context, req client.StartRequest) (clien pid := fmt.Sprintf("%s:%s", ctr.id, identity.NewID()) msgs := ctr.execMsgs.Register(pid) + if len(req.SecretEnv) > 0 { + if err := ctr.caps.Supports(pb.CapGatewayExecSecretEnv); err != nil { + return nil, err + } + } + init := &pb.InitMessage{ ContainerID: ctr.id, Meta: &opspb.Meta{ @@ -828,8 +845,9 @@ func (ctr *container) Start(ctx context.Context, req client.StartRequest) (clien Cwd: req.Cwd, User: req.User, }, - Tty: req.Tty, - Security: req.SecurityMode, + Tty: req.Tty, + Security: req.SecurityMode, + Secretenv: req.SecretEnv, } init.Meta.RemoveMountStubsRecursive = req.RemoveMountStubsRecursive if req.Stdin != nil { diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go index deb192dc11..14c6c71ab0 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go @@ -44,6 +44,10 @@ const ( // /etc/hosts for containers created via gateway exec. CapGatewayExecExtraHosts apicaps.CapID = "gateway.exec.extrahosts" + // CapGatewayExecExtraHosts is the capability to set secrets as env vars for + // containers created via gateway exec. + CapGatewayExecSecretEnv apicaps.CapID = "gateway.exec.secretenv" + // CapGatewayExecExtraHosts is the capability to send signals to a process // created via gateway exec. CapGatewayExecSignals apicaps.CapID = "gateway.exec.signals" @@ -179,6 +183,13 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapGatewayExecSecretEnv, + Name: "gateway exec secret env", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapGatewayExecSignals, Name: "gateway exec signals", diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/exit.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/exit.go index ec012f615c..d978bfa668 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/exit.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/exit.go @@ -3,7 +3,7 @@ package moby_buildkit_v1_frontend //nolint:revive import ( "fmt" - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" "github.com/moby/buildkit/util/grpcerrors" ) diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go index da36afdd14..4849adeea9 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go @@ -736,16 +736,17 @@ func (m *InputsResponse) GetDefinitions() map[string]*pb.Definition { } type ResolveImageConfigRequest struct { - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform,proto3" json:"Platform,omitempty"` - ResolveMode string `protobuf:"bytes,3,opt,name=ResolveMode,proto3" json:"ResolveMode,omitempty"` - LogName string `protobuf:"bytes,4,opt,name=LogName,proto3" json:"LogName,omitempty"` - ResolverType int32 `protobuf:"varint,5,opt,name=ResolverType,proto3" json:"ResolverType,omitempty"` - SessionID string `protobuf:"bytes,6,opt,name=SessionID,proto3" json:"SessionID,omitempty"` - StoreID string `protobuf:"bytes,7,opt,name=StoreID,proto3" json:"StoreID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform,proto3" json:"Platform,omitempty"` + ResolveMode string `protobuf:"bytes,3,opt,name=ResolveMode,proto3" json:"ResolveMode,omitempty"` + LogName string `protobuf:"bytes,4,opt,name=LogName,proto3" json:"LogName,omitempty"` + ResolverType int32 `protobuf:"varint,5,opt,name=ResolverType,proto3" json:"ResolverType,omitempty"` + SessionID string `protobuf:"bytes,6,opt,name=SessionID,proto3" json:"SessionID,omitempty"` + StoreID string `protobuf:"bytes,7,opt,name=StoreID,proto3" json:"StoreID,omitempty"` + SourcePolicies []*pb1.Policy `protobuf:"bytes,8,rep,name=SourcePolicies,proto3" json:"SourcePolicies,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ResolveImageConfigRequest) Reset() { *m = ResolveImageConfigRequest{} } @@ -830,9 +831,17 @@ func (m *ResolveImageConfigRequest) GetStoreID() string { return "" } +func (m *ResolveImageConfigRequest) GetSourcePolicies() []*pb1.Policy { + if m != nil { + return m.SourcePolicies + } + return nil +} + type ResolveImageConfigResponse struct { Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=Digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"Digest"` Config []byte `protobuf:"bytes,2,opt,name=Config,proto3" json:"Config,omitempty"` + Ref string `protobuf:"bytes,3,opt,name=Ref,proto3" json:"Ref,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -878,6 +887,13 @@ func (m *ResolveImageConfigResponse) GetConfig() []byte { return nil } +func (m *ResolveImageConfigResponse) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + type SolveRequest struct { Definition *pb.Definition `protobuf:"bytes,1,opt,name=Definition,proto3" json:"Definition,omitempty"` Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"` @@ -1823,6 +1839,7 @@ type NewContainerRequest struct { Platform *pb.Platform `protobuf:"bytes,4,opt,name=platform,proto3" json:"platform,omitempty"` Constraints *pb.WorkerConstraints `protobuf:"bytes,5,opt,name=constraints,proto3" json:"constraints,omitempty"` ExtraHosts []*pb.HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` + Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1903,6 +1920,13 @@ func (m *NewContainerRequest) GetExtraHosts() []*pb.HostIP { return nil } +func (m *NewContainerRequest) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + type NewContainerResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -2196,6 +2220,7 @@ type InitMessage struct { Fds []uint32 `protobuf:"varint,3,rep,packed,name=Fds,proto3" json:"Fds,omitempty"` Tty bool `protobuf:"varint,4,opt,name=Tty,proto3" json:"Tty,omitempty"` Security pb.SecurityMode `protobuf:"varint,5,opt,name=Security,proto3,enum=pb.SecurityMode" json:"Security,omitempty"` + Secretenv []*pb.SecretEnv `protobuf:"bytes,6,rep,name=secretenv,proto3" json:"secretenv,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2269,6 +2294,13 @@ func (m *InitMessage) GetSecurity() pb.SecurityMode { return pb.SecurityMode_SANDBOX } +func (m *InitMessage) GetSecretenv() []*pb.SecretEnv { + if m != nil { + return m.Secretenv + } + return nil +} + type ExitMessage struct { Code uint32 `protobuf:"varint,1,opt,name=Code,proto3" json:"Code,omitempty"` Error *rpc.Status `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` @@ -2627,161 +2659,164 @@ func init() { func init() { proto.RegisterFile("gateway.proto", fileDescriptor_f1a937782ebbded5) } var fileDescriptor_f1a937782ebbded5 = []byte{ - // 2452 bytes of a gzipped FileDescriptorProto + // 2497 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x59, 0xcf, 0x6f, 0x1b, 0xc7, - 0xf5, 0xd7, 0x8a, 0x14, 0x25, 0x3d, 0x52, 0x14, 0x3d, 0x76, 0xf2, 0xa5, 0x17, 0x81, 0x23, 0xaf, - 0x63, 0x45, 0x56, 0x9c, 0xa5, 0xbf, 0xb2, 0x0d, 0xb9, 0x76, 0xeb, 0xc4, 0xfa, 0x05, 0x29, 0x96, - 0x6c, 0x76, 0xe4, 0xc2, 0x45, 0x90, 0x02, 0x5d, 0x71, 0x87, 0xf4, 0xd6, 0xab, 0xdd, 0xed, 0xee, - 0xd0, 0x32, 0x93, 0x4b, 0x7b, 0x28, 0x50, 0xe4, 0xd4, 0x53, 0x6f, 0x41, 0x81, 0x16, 0xe8, 0xb9, - 0xfd, 0x03, 0xda, 0x73, 0x80, 0x5e, 0x7a, 0xee, 0x21, 0x28, 0xfc, 0x0f, 0xf4, 0x56, 0xa0, 0xb7, - 0xe2, 0xcd, 0xcc, 0x92, 0xc3, 0x1f, 0x5a, 0x92, 0xf5, 0x89, 0x33, 0x6f, 0xde, 0x8f, 0x79, 0xef, - 0xcd, 0x7b, 0xf3, 0x99, 0x25, 0x2c, 0xb5, 0x1c, 0xce, 0xce, 0x9c, 0x8e, 0x1d, 0xc5, 0x21, 0x0f, - 0xc9, 0xe5, 0xd3, 0xf0, 0xa4, 0x63, 0x9f, 0xb4, 0x3d, 0xdf, 0x7d, 0xe9, 0x71, 0xfb, 0xd5, 0xff, - 0xdb, 0xcd, 0x38, 0x0c, 0x38, 0x0b, 0x5c, 0xf3, 0xe3, 0x96, 0xc7, 0x5f, 0xb4, 0x4f, 0xec, 0x46, - 0x78, 0x5a, 0x6b, 0x85, 0xad, 0xb0, 0x26, 0x24, 0x4e, 0xda, 0x4d, 0x31, 0x13, 0x13, 0x31, 0x92, - 0x9a, 0xcc, 0x8d, 0x41, 0xf6, 0x56, 0x18, 0xb6, 0x7c, 0xe6, 0x44, 0x5e, 0xa2, 0x86, 0xb5, 0x38, - 0x6a, 0xd4, 0x12, 0xee, 0xf0, 0x76, 0xa2, 0x64, 0x6e, 0x6a, 0x32, 0xb8, 0x91, 0x5a, 0xba, 0x91, - 0x5a, 0x12, 0xfa, 0xaf, 0x58, 0x5c, 0x8b, 0x4e, 0x6a, 0x61, 0x94, 0x72, 0xd7, 0xce, 0xe5, 0x76, - 0x22, 0xaf, 0xc6, 0x3b, 0x11, 0x4b, 0x6a, 0x67, 0x61, 0xfc, 0x92, 0xc5, 0x4a, 0xe0, 0xf6, 0xb9, - 0x02, 0x6d, 0xee, 0xf9, 0x28, 0xd5, 0x70, 0xa2, 0x04, 0x8d, 0xe0, 0xaf, 0x12, 0xd2, 0xdd, 0xe6, - 0x61, 0xe0, 0x25, 0xdc, 0xf3, 0x5a, 0x5e, 0xad, 0x99, 0x08, 0x19, 0x69, 0x05, 0x9d, 0x50, 0xec, - 0x77, 0x33, 0x5c, 0x68, 0xc7, 0x0d, 0x16, 0x85, 0xbe, 0xd7, 0xe8, 0xa0, 0x0d, 0x39, 0x92, 0x62, - 0xd6, 0xdf, 0xf2, 0x50, 0xa0, 0x2c, 0x69, 0xfb, 0x9c, 0xac, 0xc2, 0x52, 0xcc, 0x9a, 0x3b, 0x2c, - 0x8a, 0x59, 0xc3, 0xe1, 0xcc, 0xad, 0x1a, 0x2b, 0xc6, 0xda, 0xe2, 0xfe, 0x0c, 0xed, 0x27, 0x93, - 0x1f, 0x41, 0x39, 0x66, 0xcd, 0x44, 0x63, 0x9c, 0x5d, 0x31, 0xd6, 0x8a, 0x1b, 0x1f, 0xd9, 0xe7, - 0xe6, 0xd0, 0xa6, 0xac, 0x79, 0xe4, 0x44, 0x3d, 0x91, 0xfd, 0x19, 0x3a, 0xa0, 0x84, 0x6c, 0x40, - 0x2e, 0x66, 0xcd, 0x6a, 0x4e, 0xe8, 0xba, 0x92, 0xad, 0x6b, 0x7f, 0x86, 0x22, 0x33, 0xd9, 0x84, - 0x3c, 0x6a, 0xa9, 0xe6, 0x85, 0xd0, 0xd5, 0xb1, 0x1b, 0xd8, 0x9f, 0xa1, 0x42, 0x80, 0x3c, 0x86, - 0x85, 0x53, 0xc6, 0x1d, 0xd7, 0xe1, 0x4e, 0x15, 0x56, 0x72, 0x6b, 0xc5, 0x8d, 0x5a, 0xa6, 0x30, - 0x06, 0xc8, 0x3e, 0x52, 0x12, 0xbb, 0x01, 0x8f, 0x3b, 0xb4, 0xab, 0x80, 0x3c, 0x87, 0x92, 0xc3, - 0x39, 0xc3, 0x64, 0x78, 0x61, 0x90, 0x54, 0x4b, 0x42, 0xe1, 0xed, 0xf1, 0x0a, 0x1f, 0x69, 0x52, - 0x52, 0x69, 0x9f, 0x22, 0xf3, 0x01, 0x2c, 0xf5, 0xd9, 0x24, 0x15, 0xc8, 0xbd, 0x64, 0x1d, 0x99, - 0x18, 0x8a, 0x43, 0x72, 0x09, 0xe6, 0x5e, 0x39, 0x7e, 0x9b, 0x89, 0x1c, 0x94, 0xa8, 0x9c, 0xdc, - 0x9f, 0xbd, 0x67, 0x98, 0x2f, 0xe0, 0xc2, 0x90, 0xfe, 0x11, 0x0a, 0x7e, 0xa0, 0x2b, 0x28, 0x6e, - 0x7c, 0x98, 0xb1, 0x6b, 0x5d, 0x9d, 0x66, 0x69, 0x6b, 0x01, 0x0a, 0xb1, 0x70, 0xc8, 0xfa, 0xad, - 0x01, 0x95, 0xc1, 0x54, 0x93, 0x03, 0x95, 0x24, 0x43, 0x84, 0xe5, 0xee, 0x14, 0xa7, 0x04, 0x09, - 0x2a, 0x30, 0x42, 0x85, 0xb9, 0x09, 0x8b, 0x5d, 0xd2, 0xb8, 0x60, 0x2c, 0x6a, 0x5b, 0xb4, 0x36, - 0x21, 0x47, 0x59, 0x93, 0x94, 0x61, 0xd6, 0x53, 0xe7, 0x9a, 0xce, 0x7a, 0x2e, 0x59, 0x81, 0x9c, - 0xcb, 0x9a, 0xca, 0xf5, 0xb2, 0x1d, 0x9d, 0xd8, 0x3b, 0xac, 0xe9, 0x05, 0x1e, 0xba, 0x48, 0x71, - 0xc9, 0xfa, 0xbd, 0x81, 0xf5, 0x81, 0xdb, 0x22, 0x9f, 0xf4, 0xf9, 0x31, 0xfe, 0xb4, 0x0f, 0xed, - 0xfe, 0x79, 0xf6, 0xee, 0xef, 0xf4, 0x67, 0x62, 0x4c, 0x09, 0xe8, 0xde, 0xfd, 0x18, 0x4a, 0x7a, - 0x6e, 0xc8, 0x3e, 0x14, 0xb5, 0x73, 0xa4, 0x36, 0xbc, 0x3a, 0x59, 0x66, 0xa9, 0x2e, 0x6a, 0xfd, - 0x31, 0x07, 0x45, 0x6d, 0x91, 0x3c, 0x84, 0xfc, 0x4b, 0x2f, 0x90, 0x21, 0x2c, 0x6f, 0xac, 0x4f, - 0xa6, 0xf2, 0xb1, 0x17, 0xb8, 0x54, 0xc8, 0x91, 0xba, 0x56, 0x77, 0xb3, 0x62, 0x5b, 0x77, 0x26, - 0xd3, 0x71, 0x6e, 0xf1, 0xdd, 0x9a, 0xa2, 0x6d, 0xc8, 0xa6, 0x41, 0x20, 0x1f, 0x39, 0xfc, 0x85, - 0x68, 0x1a, 0x8b, 0x54, 0x8c, 0xc9, 0x2d, 0xb8, 0xe8, 0x05, 0xcf, 0x42, 0x1e, 0xd6, 0x63, 0xe6, - 0x7a, 0x78, 0xf8, 0x9e, 0x75, 0x22, 0x56, 0x9d, 0x13, 0x2c, 0xa3, 0x96, 0x48, 0x1d, 0xca, 0x92, - 0x7c, 0xdc, 0x3e, 0xf9, 0x19, 0x6b, 0xf0, 0xa4, 0x5a, 0x10, 0xfe, 0xac, 0x65, 0x6c, 0xe1, 0x40, - 0x17, 0xa0, 0x03, 0xf2, 0x6f, 0x55, 0xed, 0xd6, 0x9f, 0x0d, 0x58, 0xea, 0x53, 0x4f, 0x3e, 0xed, - 0x4b, 0xd5, 0xcd, 0x49, 0xb7, 0xa5, 0x25, 0xeb, 0x33, 0x28, 0xb8, 0x5e, 0x8b, 0x25, 0x5c, 0xa4, - 0x6a, 0x71, 0x6b, 0xe3, 0xdb, 0xef, 0xde, 0x9f, 0xf9, 0xc7, 0x77, 0xef, 0xaf, 0x6b, 0x57, 0x4d, - 0x18, 0xb1, 0xa0, 0x11, 0x06, 0xdc, 0xf1, 0x02, 0x16, 0xe3, 0x05, 0xfb, 0xb1, 0x14, 0xb1, 0x77, - 0xc4, 0x0f, 0x55, 0x1a, 0x30, 0xe8, 0x81, 0x73, 0xca, 0x44, 0x9e, 0x16, 0xa9, 0x18, 0x5b, 0x1c, - 0x96, 0x28, 0xe3, 0xed, 0x38, 0xa0, 0xec, 0xe7, 0x6d, 0x64, 0xfa, 0x5e, 0xda, 0x48, 0xc4, 0xa6, - 0xc7, 0x35, 0x74, 0x64, 0xa4, 0x4a, 0x80, 0xac, 0xc1, 0x1c, 0x8b, 0xe3, 0x30, 0x56, 0xc5, 0x43, - 0x6c, 0x79, 0xd5, 0xdb, 0x71, 0xd4, 0xb0, 0x8f, 0xc5, 0x55, 0x4f, 0x25, 0x83, 0x55, 0x81, 0x72, - 0x6a, 0x35, 0x89, 0xc2, 0x20, 0x61, 0xd6, 0x32, 0x86, 0x2e, 0x6a, 0xf3, 0x44, 0xed, 0xc3, 0xfa, - 0xab, 0x01, 0xe5, 0x94, 0x22, 0x79, 0xc8, 0x17, 0x50, 0xec, 0xb5, 0x86, 0xb4, 0x07, 0xdc, 0xcf, - 0x0c, 0xaa, 0x2e, 0xaf, 0xf5, 0x15, 0xd5, 0x12, 0x74, 0x75, 0xe6, 0x13, 0xa8, 0x0c, 0x32, 0x8c, - 0xc8, 0xfe, 0x07, 0xfd, 0x0d, 0x62, 0xb0, 0x5f, 0x69, 0xa7, 0xe1, 0x5f, 0x06, 0x5c, 0xa6, 0x4c, - 0x60, 0x97, 0x83, 0x53, 0xa7, 0xc5, 0xb6, 0xc3, 0xa0, 0xe9, 0xb5, 0xd2, 0x30, 0x57, 0x44, 0x33, - 0x4c, 0x35, 0x63, 0x5f, 0x5c, 0x83, 0x85, 0xba, 0xef, 0xf0, 0x66, 0x18, 0x9f, 0x2a, 0xe5, 0x25, - 0x54, 0x9e, 0xd2, 0x68, 0x77, 0x95, 0xac, 0x40, 0x51, 0x29, 0x3e, 0x0a, 0xdd, 0x34, 0x9d, 0x3a, - 0x89, 0x54, 0x61, 0xfe, 0x30, 0x6c, 0x3d, 0xc1, 0x64, 0xcb, 0x0a, 0x4b, 0xa7, 0xc4, 0x82, 0x92, - 0x62, 0x8c, 0xbb, 0xd5, 0x35, 0x47, 0xfb, 0x68, 0xe4, 0x3d, 0x58, 0x3c, 0x66, 0x49, 0xe2, 0x85, - 0xc1, 0xc1, 0x4e, 0xb5, 0x20, 0xe4, 0x7b, 0x04, 0xd4, 0x7d, 0xcc, 0xc3, 0x98, 0x1d, 0xec, 0x54, - 0xe7, 0xa5, 0x6e, 0x35, 0xb5, 0x7e, 0x61, 0x80, 0x39, 0xca, 0x63, 0x95, 0xbe, 0xcf, 0xa0, 0x20, - 0x0f, 0xa4, 0xf4, 0xfa, 0x7f, 0x3b, 0xca, 0xf2, 0x97, 0xbc, 0x0b, 0x05, 0xa9, 0x5d, 0x55, 0xa1, - 0x9a, 0x59, 0xbf, 0x2a, 0x40, 0xe9, 0x18, 0x37, 0x90, 0xc6, 0xd9, 0x06, 0xe8, 0xa5, 0x47, 0x1d, - 0xe9, 0xc1, 0xa4, 0x69, 0x1c, 0xc4, 0x84, 0x85, 0x3d, 0x75, 0x7c, 0xd4, 0x0d, 0xd6, 0x9d, 0x93, - 0xcf, 0xa1, 0x98, 0x8e, 0x9f, 0x46, 0xbc, 0x9a, 0x13, 0xe7, 0xef, 0x5e, 0xc6, 0xf9, 0xd3, 0x77, - 0x62, 0x6b, 0xa2, 0xea, 0xf4, 0x69, 0x14, 0x72, 0x13, 0x2e, 0x38, 0xbe, 0x1f, 0x9e, 0xa9, 0x92, - 0x12, 0xc5, 0x21, 0x92, 0xb3, 0x40, 0x87, 0x17, 0xb0, 0x55, 0x6a, 0xc4, 0x47, 0x71, 0xec, 0x74, - 0xf0, 0x34, 0x15, 0x04, 0xff, 0xa8, 0x25, 0xec, 0x5a, 0x7b, 0x5e, 0xe0, 0xf8, 0x55, 0x10, 0x3c, - 0x72, 0x82, 0xa7, 0x61, 0xf7, 0x75, 0x14, 0xc6, 0x9c, 0xc5, 0x8f, 0x38, 0x8f, 0xab, 0x45, 0x11, - 0xcc, 0x3e, 0x1a, 0xa9, 0x43, 0x69, 0xdb, 0x69, 0xbc, 0x60, 0x07, 0xa7, 0x48, 0x4c, 0x91, 0x55, - 0x56, 0x2f, 0x13, 0xec, 0x4f, 0x23, 0x1d, 0x52, 0xe9, 0x1a, 0x48, 0x03, 0xca, 0xa9, 0xeb, 0xb2, - 0x42, 0xab, 0x4b, 0x42, 0xe7, 0x83, 0x69, 0x43, 0x29, 0xa5, 0xa5, 0x89, 0x01, 0x95, 0x98, 0xc8, - 0x5d, 0x2c, 0x46, 0x87, 0xb3, 0x6a, 0x59, 0xf8, 0xdc, 0x9d, 0x93, 0x23, 0x28, 0x1f, 0x0b, 0x40, - 0x5e, 0x47, 0x18, 0xee, 0xb1, 0xa4, 0xba, 0x2c, 0x36, 0x70, 0x7d, 0x78, 0x03, 0x3a, 0x70, 0xb7, - 0x05, 0x7b, 0x87, 0x0e, 0x08, 0x9b, 0x0f, 0xa1, 0x32, 0x98, 0xdc, 0x69, 0x80, 0x91, 0xf9, 0x43, - 0xb8, 0x38, 0xc2, 0xa3, 0xb7, 0x6a, 0x3e, 0x7f, 0x32, 0xe0, 0xc2, 0x50, 0x1a, 0xf0, 0x02, 0x10, - 0x45, 0x2f, 0x55, 0x8a, 0x31, 0x39, 0x82, 0x39, 0x4c, 0x73, 0xa2, 0xa0, 0xc0, 0xe6, 0x34, 0x79, - 0xb5, 0x85, 0xa4, 0x8c, 0xbf, 0xd4, 0x62, 0xde, 0x03, 0xe8, 0x11, 0xa7, 0x82, 0x87, 0x5f, 0xc0, - 0x92, 0x4a, 0xb2, 0xea, 0x17, 0x15, 0x89, 0x2a, 0x94, 0x30, 0xa2, 0x86, 0xde, 0xdd, 0x94, 0x9b, - 0xf2, 0x6e, 0xb2, 0xbe, 0x82, 0x65, 0xca, 0x1c, 0x77, 0xcf, 0xf3, 0xd9, 0xf9, 0x2d, 0x18, 0x8b, - 0xdf, 0xf3, 0x59, 0x1d, 0x91, 0x49, 0x5a, 0xfc, 0x6a, 0x4e, 0xee, 0xc3, 0x1c, 0x75, 0x82, 0x16, - 0x53, 0xa6, 0x3f, 0xc8, 0x30, 0x2d, 0x8c, 0x20, 0x2f, 0x95, 0x22, 0xd6, 0x03, 0x58, 0xec, 0xd2, - 0xb0, 0x75, 0x3d, 0x6d, 0x36, 0x13, 0x26, 0xdb, 0x60, 0x8e, 0xaa, 0x19, 0xd2, 0x0f, 0x59, 0xd0, - 0x52, 0xa6, 0x73, 0x54, 0xcd, 0xac, 0x55, 0x84, 0xf3, 0xe9, 0xce, 0x55, 0x68, 0x08, 0xe4, 0x77, - 0x10, 0xbe, 0x19, 0xa2, 0x5e, 0xc5, 0xd8, 0x72, 0xf1, 0x4e, 0x75, 0xdc, 0x1d, 0x2f, 0x3e, 0xdf, - 0xc1, 0x2a, 0xcc, 0xef, 0x78, 0xb1, 0xe6, 0x5f, 0x3a, 0x25, 0xab, 0x78, 0xdb, 0x36, 0xfc, 0xb6, - 0x8b, 0xde, 0x72, 0x16, 0x07, 0xea, 0x5a, 0x19, 0xa0, 0x5a, 0x9f, 0xc8, 0x38, 0x0a, 0x2b, 0x6a, - 0x33, 0x37, 0x61, 0x9e, 0x05, 0x3c, 0xc6, 0x32, 0x92, 0x57, 0x32, 0xb1, 0xe5, 0x03, 0xd9, 0x16, - 0x0f, 0x64, 0x71, 0xf5, 0xd3, 0x94, 0xc5, 0xda, 0x84, 0x65, 0x24, 0x64, 0x27, 0x82, 0x40, 0x5e, - 0xdb, 0xa4, 0x18, 0x5b, 0xf7, 0xa1, 0xd2, 0x13, 0x54, 0xa6, 0x57, 0x21, 0x8f, 0xd8, 0x54, 0xf5, - 0xf5, 0x51, 0x76, 0xc5, 0xba, 0x75, 0x0d, 0x96, 0xd3, 0xe2, 0x3f, 0xd7, 0xa8, 0x45, 0xa0, 0xd2, - 0x63, 0x52, 0xb0, 0x64, 0x09, 0x8a, 0x75, 0x2f, 0x48, 0x6f, 0x6d, 0xeb, 0x8d, 0x01, 0xa5, 0x7a, - 0x18, 0xf4, 0xee, 0xb4, 0x3a, 0x2c, 0xa7, 0xa5, 0xfb, 0xa8, 0x7e, 0xb0, 0xed, 0x44, 0x69, 0x0c, - 0x56, 0x86, 0xcf, 0x87, 0xfa, 0xc4, 0x60, 0x4b, 0xc6, 0xad, 0x3c, 0x5e, 0x7f, 0x74, 0x50, 0x9c, - 0x7c, 0x0a, 0xf3, 0x87, 0x87, 0x5b, 0x42, 0xd3, 0xec, 0x54, 0x9a, 0x52, 0x31, 0xf2, 0x10, 0xe6, - 0x9f, 0x8b, 0x2f, 0x1f, 0x89, 0xba, 0xa2, 0x46, 0x9c, 0x55, 0x19, 0x21, 0xc9, 0x46, 0x59, 0x23, - 0x8c, 0x5d, 0x9a, 0x0a, 0x59, 0xff, 0x36, 0xa0, 0xf8, 0xdc, 0xe9, 0x21, 0xc2, 0x1e, 0x04, 0x7d, - 0x8b, 0x7b, 0x5b, 0x41, 0xd0, 0x4b, 0x30, 0xe7, 0xb3, 0x57, 0xcc, 0x57, 0x67, 0x5c, 0x4e, 0x90, - 0x9a, 0xbc, 0x08, 0x63, 0x59, 0xd6, 0x25, 0x2a, 0x27, 0x58, 0x10, 0x2e, 0xe3, 0x8e, 0xe7, 0x57, - 0xf3, 0x2b, 0x39, 0xbc, 0xe3, 0xe5, 0x0c, 0x33, 0xd7, 0x8e, 0x7d, 0xf5, 0x2e, 0xc0, 0x21, 0xb1, - 0x20, 0xef, 0x05, 0xcd, 0x50, 0xdc, 0x7f, 0xaa, 0x2d, 0xca, 0x16, 0x7d, 0x10, 0x34, 0x43, 0x2a, - 0xd6, 0xc8, 0x55, 0x28, 0xc4, 0x58, 0x7f, 0x49, 0x75, 0x5e, 0x04, 0x65, 0x11, 0xb9, 0x64, 0x95, - 0xaa, 0x05, 0xab, 0x0c, 0x25, 0xe9, 0xb7, 0x4a, 0xfe, 0x6f, 0x66, 0xe1, 0xe2, 0x13, 0x76, 0xb6, - 0x9d, 0xfa, 0x95, 0x06, 0x64, 0x05, 0x8a, 0x5d, 0xda, 0xc1, 0x8e, 0x3a, 0x42, 0x3a, 0x09, 0x8d, - 0x1d, 0x85, 0xed, 0x80, 0xa7, 0x39, 0x14, 0xc6, 0x04, 0x85, 0xaa, 0x05, 0x72, 0x1d, 0xe6, 0x9f, - 0x30, 0x7e, 0x16, 0xc6, 0x2f, 0x85, 0xd7, 0xe5, 0x8d, 0x22, 0xf2, 0x3c, 0x61, 0x1c, 0x01, 0x1c, - 0x4d, 0xd7, 0x10, 0x15, 0x46, 0x29, 0x2a, 0xcc, 0x8f, 0x42, 0x85, 0xe9, 0x2a, 0xd9, 0x84, 0x62, - 0x23, 0x0c, 0x12, 0x1e, 0x3b, 0x1e, 0x1a, 0x9e, 0x13, 0xcc, 0xef, 0x20, 0xb3, 0x4c, 0xec, 0x76, - 0x6f, 0x91, 0xea, 0x9c, 0x64, 0x1d, 0x80, 0xbd, 0xe6, 0xb1, 0xb3, 0x1f, 0x26, 0xdd, 0x17, 0x14, - 0xa0, 0x1c, 0x12, 0x0e, 0xea, 0x54, 0x5b, 0xb5, 0xde, 0x85, 0x4b, 0xfd, 0x11, 0x51, 0xa1, 0x7a, - 0x00, 0xff, 0x47, 0x99, 0xcf, 0x9c, 0x84, 0x4d, 0x1f, 0x2d, 0xcb, 0x84, 0xea, 0xb0, 0xb0, 0x52, - 0xfc, 0x9f, 0x1c, 0x14, 0x77, 0x5f, 0xb3, 0xc6, 0x11, 0x4b, 0x12, 0xa7, 0x25, 0xb0, 0x69, 0x3d, - 0x0e, 0x1b, 0x2c, 0x49, 0xba, 0xba, 0x7a, 0x04, 0xf2, 0x7d, 0xc8, 0x1f, 0x04, 0x1e, 0x57, 0xf7, - 0xe3, 0x6a, 0xe6, 0xd3, 0xc0, 0xe3, 0x4a, 0xe7, 0xfe, 0x0c, 0x15, 0x52, 0xe4, 0x3e, 0xe4, 0xb1, - 0xbb, 0x4c, 0xd2, 0xe1, 0x5d, 0x4d, 0x16, 0x65, 0xc8, 0x96, 0xf8, 0x84, 0xe7, 0x7d, 0xc9, 0x54, - 0x96, 0xd6, 0xb2, 0xaf, 0x26, 0xef, 0x4b, 0xd6, 0xd3, 0xa0, 0x24, 0xc9, 0x2e, 0x22, 0x6b, 0x27, - 0xe6, 0xcc, 0x55, 0xd9, 0xbb, 0x91, 0x05, 0x88, 0x24, 0x67, 0x4f, 0x4b, 0x2a, 0x8b, 0x41, 0xd8, - 0x7d, 0xed, 0x71, 0x55, 0x0d, 0x59, 0x41, 0x40, 0x36, 0xcd, 0x11, 0x9c, 0xa2, 0xf4, 0x4e, 0x18, - 0x30, 0x81, 0xed, 0xb3, 0xa5, 0x91, 0x4d, 0x93, 0xc6, 0x29, 0x86, 0xe1, 0xd8, 0x6b, 0x21, 0xce, - 0x5c, 0x18, 0x1b, 0x06, 0xc9, 0xa8, 0x85, 0x41, 0x12, 0xb6, 0xe6, 0x61, 0x4e, 0xc0, 0x20, 0xeb, - 0x77, 0x06, 0x14, 0xb5, 0x3c, 0x4d, 0x50, 0x77, 0xef, 0x41, 0x1e, 0x9f, 0xef, 0x2a, 0xff, 0x0b, - 0xa2, 0xea, 0x18, 0x77, 0xa8, 0xa0, 0x62, 0xe3, 0xd8, 0x73, 0x65, 0x53, 0x5c, 0xa2, 0x38, 0x44, - 0xca, 0x33, 0xde, 0x11, 0x29, 0x5b, 0xa0, 0x38, 0x24, 0x37, 0x61, 0xe1, 0x98, 0x35, 0xda, 0xb1, - 0xc7, 0x3b, 0x22, 0x09, 0xe5, 0x8d, 0x8a, 0x68, 0x27, 0x8a, 0x26, 0x8a, 0xb3, 0xcb, 0x61, 0x3d, - 0xc6, 0xc3, 0xd9, 0xdb, 0x20, 0x81, 0xfc, 0x36, 0xbe, 0xc8, 0x70, 0x67, 0x4b, 0x54, 0x8c, 0xf1, - 0x51, 0xbc, 0x3b, 0xee, 0x51, 0xbc, 0x9b, 0x3e, 0x8a, 0xfb, 0x93, 0x8a, 0xb7, 0x8f, 0x16, 0x64, - 0xeb, 0x11, 0x2c, 0x76, 0x0f, 0x1e, 0x29, 0xc3, 0xec, 0x9e, 0xab, 0x2c, 0xcd, 0xee, 0xb9, 0xe8, - 0xca, 0xee, 0xd3, 0x3d, 0x61, 0x65, 0x81, 0xe2, 0xb0, 0x0b, 0x12, 0x72, 0x1a, 0x48, 0xd8, 0xc4, - 0xe7, 0xbe, 0x76, 0xfa, 0x90, 0x89, 0x86, 0x67, 0x49, 0xba, 0x65, 0x1c, 0x4b, 0x37, 0xfc, 0x44, - 0xe8, 0x12, 0x6e, 0xf8, 0x89, 0x75, 0x0d, 0x96, 0xfa, 0xf2, 0x85, 0x4c, 0xe2, 0x7d, 0xa9, 0xb0, - 0x24, 0x8e, 0xd7, 0x19, 0x2c, 0x0f, 0x7c, 0x72, 0x22, 0xd7, 0xa1, 0x20, 0x3f, 0x6d, 0x54, 0x66, - 0xcc, 0xcb, 0x5f, 0x7f, 0xb3, 0xf2, 0xce, 0x00, 0x83, 0x5c, 0x44, 0xb6, 0xad, 0x76, 0xe0, 0xfa, - 0xac, 0x62, 0x8c, 0x64, 0x93, 0x8b, 0x66, 0xfe, 0xd7, 0x7f, 0xb8, 0x32, 0xb3, 0xee, 0xc0, 0x85, - 0xa1, 0xcf, 0x25, 0xe4, 0x1a, 0xe4, 0x8f, 0x99, 0xdf, 0x4c, 0xcd, 0x0c, 0x31, 0xe0, 0x22, 0xb9, - 0x0a, 0x39, 0xea, 0x9c, 0x55, 0x0c, 0xb3, 0xfa, 0xf5, 0x37, 0x2b, 0x97, 0x86, 0xbf, 0xb9, 0x38, - 0x67, 0xd2, 0xc4, 0xc6, 0x5f, 0x00, 0x16, 0x0f, 0x0f, 0xb7, 0xb6, 0x62, 0xcf, 0x6d, 0x31, 0xf2, - 0x4b, 0x03, 0xc8, 0xf0, 0xc3, 0x96, 0xdc, 0xc9, 0xae, 0xf1, 0xd1, 0x2f, 0x7f, 0xf3, 0xee, 0x94, - 0x52, 0x0a, 0x69, 0x7c, 0x0e, 0x73, 0x02, 0x1e, 0x93, 0x0f, 0x27, 0x7c, 0x25, 0x99, 0x6b, 0xe3, - 0x19, 0x95, 0xee, 0x06, 0x2c, 0xa4, 0x10, 0x93, 0xac, 0x67, 0x6e, 0xaf, 0x0f, 0x41, 0x9b, 0x1f, - 0x4d, 0xc4, 0xab, 0x8c, 0xfc, 0x14, 0xe6, 0x15, 0x72, 0x24, 0x37, 0xc6, 0xc8, 0xf5, 0x30, 0xac, - 0xb9, 0x3e, 0x09, 0x6b, 0xcf, 0x8d, 0x14, 0x21, 0x66, 0xba, 0x31, 0x80, 0x3f, 0x33, 0xdd, 0x18, - 0x82, 0x9c, 0x8d, 0xde, 0xbb, 0x32, 0xd3, 0xc8, 0x00, 0xde, 0xcc, 0x34, 0x32, 0x08, 0x3b, 0xc9, - 0x73, 0xc8, 0x23, 0xec, 0x24, 0x59, 0xed, 0x57, 0xc3, 0xa5, 0x66, 0xd6, 0x99, 0xe8, 0xc3, 0xab, - 0x3f, 0xc1, 0x6b, 0x4a, 0x7c, 0x42, 0xc8, 0xbe, 0xa0, 0xb4, 0x2f, 0x82, 0xe6, 0x8d, 0x09, 0x38, - 0x7b, 0xea, 0xd5, 0xf3, 0x7b, 0x6d, 0x82, 0xcf, 0x72, 0xe3, 0xd5, 0x0f, 0x7c, 0x00, 0x0c, 0xa1, - 0xa4, 0xa3, 0x0f, 0x62, 0x67, 0x88, 0x8e, 0x00, 0x6e, 0x66, 0x6d, 0x62, 0x7e, 0x65, 0xf0, 0x2b, - 0x7c, 0x7b, 0xf5, 0x23, 0x13, 0xb2, 0x91, 0x19, 0x8e, 0x91, 0x18, 0xc8, 0xbc, 0x3d, 0x95, 0x8c, - 0x32, 0xee, 0x48, 0xe4, 0xa3, 0xd0, 0x0d, 0xc9, 0xbe, 0xc8, 0xbb, 0x08, 0xc9, 0x9c, 0x90, 0x6f, - 0xcd, 0xb8, 0x65, 0xe0, 0x39, 0x43, 0xc4, 0x9b, 0xa9, 0x5b, 0x7b, 0x0a, 0x64, 0x9e, 0x33, 0x1d, - 0x3a, 0x6f, 0x95, 0xbe, 0x7d, 0x73, 0xc5, 0xf8, 0xfb, 0x9b, 0x2b, 0xc6, 0x3f, 0xdf, 0x5c, 0x31, - 0x4e, 0x0a, 0xe2, 0x7f, 0xce, 0xdb, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x91, 0xe5, 0xca, - 0x70, 0x1e, 0x00, 0x00, + 0xf5, 0xd7, 0x8a, 0x14, 0x45, 0x3e, 0xfe, 0x10, 0x3d, 0x71, 0xf2, 0xa5, 0x17, 0x81, 0x23, 0xaf, + 0x63, 0x45, 0x96, 0x1d, 0xd2, 0x5f, 0xd9, 0x86, 0x5c, 0xbb, 0x75, 0x62, 0xfd, 0x82, 0x14, 0x4b, + 0x36, 0x3b, 0x72, 0xe1, 0x22, 0x48, 0x81, 0xae, 0xb8, 0x43, 0x6a, 0xeb, 0xd5, 0xee, 0x76, 0x77, + 0x28, 0x59, 0xc9, 0xa9, 0x87, 0x02, 0x45, 0x8e, 0x3d, 0xf4, 0x96, 0x4b, 0x0b, 0xf4, 0xd4, 0x43, + 0xfb, 0x07, 0x34, 0xe7, 0x00, 0xed, 0xa1, 0xe7, 0x1e, 0x82, 0xc2, 0x7f, 0x44, 0x81, 0xde, 0x8a, + 0x37, 0x33, 0x4b, 0x0e, 0x7f, 0x68, 0x45, 0xd6, 0x27, 0xce, 0xbc, 0x79, 0x3f, 0xe6, 0xbd, 0x37, + 0xef, 0xcd, 0x67, 0x96, 0x50, 0xee, 0xd8, 0x9c, 0x9d, 0xda, 0x67, 0xf5, 0x30, 0x0a, 0x78, 0x40, + 0xae, 0x1c, 0x07, 0x87, 0x67, 0xf5, 0xc3, 0xae, 0xeb, 0x39, 0xaf, 0x5c, 0x5e, 0x3f, 0xf9, 0xff, + 0x7a, 0x3b, 0x0a, 0x7c, 0xce, 0x7c, 0xc7, 0xfc, 0xb8, 0xe3, 0xf2, 0xa3, 0xee, 0x61, 0xbd, 0x15, + 0x1c, 0x37, 0x3a, 0x41, 0x27, 0x68, 0x08, 0x89, 0xc3, 0x6e, 0x5b, 0xcc, 0xc4, 0x44, 0x8c, 0xa4, + 0x26, 0x73, 0x75, 0x98, 0xbd, 0x13, 0x04, 0x1d, 0x8f, 0xd9, 0xa1, 0x1b, 0xab, 0x61, 0x23, 0x0a, + 0x5b, 0x8d, 0x98, 0xdb, 0xbc, 0x1b, 0x2b, 0x99, 0xdb, 0x9a, 0x0c, 0x6e, 0xa4, 0x91, 0x6c, 0xa4, + 0x11, 0x07, 0xde, 0x09, 0x8b, 0x1a, 0xe1, 0x61, 0x23, 0x08, 0x13, 0xee, 0xc6, 0xb9, 0xdc, 0x76, + 0xe8, 0x36, 0xf8, 0x59, 0xc8, 0xe2, 0xc6, 0x69, 0x10, 0xbd, 0x62, 0x91, 0x12, 0xb8, 0x7b, 0xae, + 0x40, 0x97, 0xbb, 0x1e, 0x4a, 0xb5, 0xec, 0x30, 0x46, 0x23, 0xf8, 0xab, 0x84, 0x74, 0xb7, 0x79, + 0xe0, 0xbb, 0x31, 0x77, 0xdd, 0x8e, 0xdb, 0x68, 0xc7, 0x42, 0x46, 0x5a, 0x41, 0x27, 0x14, 0xfb, + 0xfd, 0x14, 0x17, 0xba, 0x51, 0x8b, 0x85, 0x81, 0xe7, 0xb6, 0xce, 0xd0, 0x86, 0x1c, 0x49, 0x31, + 0xeb, 0x6f, 0x59, 0xc8, 0x51, 0x16, 0x77, 0x3d, 0x4e, 0x96, 0xa0, 0x1c, 0xb1, 0xf6, 0x26, 0x0b, + 0x23, 0xd6, 0xb2, 0x39, 0x73, 0x6a, 0xc6, 0xa2, 0xb1, 0x5c, 0xd8, 0x99, 0xa1, 0x83, 0x64, 0xf2, + 0x13, 0xa8, 0x44, 0xac, 0x1d, 0x6b, 0x8c, 0xb3, 0x8b, 0xc6, 0x72, 0x71, 0xf5, 0x56, 0xfd, 0xdc, + 0x1c, 0xd6, 0x29, 0x6b, 0xef, 0xdb, 0x61, 0x5f, 0x64, 0x67, 0x86, 0x0e, 0x29, 0x21, 0xab, 0x90, + 0x89, 0x58, 0xbb, 0x96, 0x11, 0xba, 0xae, 0xa6, 0xeb, 0xda, 0x99, 0xa1, 0xc8, 0x4c, 0xd6, 0x20, + 0x8b, 0x5a, 0x6a, 0x59, 0x21, 0x74, 0xed, 0xc2, 0x0d, 0xec, 0xcc, 0x50, 0x21, 0x40, 0x9e, 0x42, + 0xfe, 0x98, 0x71, 0xdb, 0xb1, 0xb9, 0x5d, 0x83, 0xc5, 0xcc, 0x72, 0x71, 0xb5, 0x91, 0x2a, 0x8c, + 0x01, 0xaa, 0xef, 0x2b, 0x89, 0x2d, 0x9f, 0x47, 0x67, 0xb4, 0xa7, 0x80, 0xbc, 0x84, 0x92, 0xcd, + 0x39, 0xc3, 0x64, 0xb8, 0x81, 0x1f, 0xd7, 0x4a, 0x42, 0xe1, 0xdd, 0x8b, 0x15, 0x3e, 0xd1, 0xa4, + 0xa4, 0xd2, 0x01, 0x45, 0xe6, 0x23, 0x28, 0x0f, 0xd8, 0x24, 0x55, 0xc8, 0xbc, 0x62, 0x67, 0x32, + 0x31, 0x14, 0x87, 0xe4, 0x32, 0xcc, 0x9d, 0xd8, 0x5e, 0x97, 0x89, 0x1c, 0x94, 0xa8, 0x9c, 0x3c, + 0x9c, 0x7d, 0x60, 0x98, 0x47, 0x70, 0x69, 0x44, 0xff, 0x18, 0x05, 0x3f, 0xd2, 0x15, 0x14, 0x57, + 0x3f, 0x4a, 0xd9, 0xb5, 0xae, 0x4e, 0xb3, 0xb4, 0x9e, 0x87, 0x5c, 0x24, 0x1c, 0xb2, 0x7e, 0x67, + 0x40, 0x75, 0x38, 0xd5, 0x64, 0x57, 0x25, 0xc9, 0x10, 0x61, 0xb9, 0x3f, 0xc5, 0x29, 0x41, 0x82, + 0x0a, 0x8c, 0x50, 0x61, 0xae, 0x41, 0xa1, 0x47, 0xba, 0x28, 0x18, 0x05, 0x6d, 0x8b, 0xd6, 0x1a, + 0x64, 0x28, 0x6b, 0x93, 0x0a, 0xcc, 0xba, 0xea, 0x5c, 0xd3, 0x59, 0xd7, 0x21, 0x8b, 0x90, 0x71, + 0x58, 0x5b, 0xb9, 0x5e, 0xa9, 0x87, 0x87, 0xf5, 0x4d, 0xd6, 0x76, 0x7d, 0x17, 0x5d, 0xa4, 0xb8, + 0x64, 0xfd, 0xde, 0xc0, 0xfa, 0xc0, 0x6d, 0x91, 0x4f, 0x06, 0xfc, 0xb8, 0xf8, 0xb4, 0x8f, 0xec, + 0xfe, 0x65, 0xfa, 0xee, 0xef, 0x0d, 0x66, 0xe2, 0x82, 0x12, 0xd0, 0xbd, 0xfb, 0x29, 0x94, 0xf4, + 0xdc, 0x90, 0x1d, 0x28, 0x6a, 0xe7, 0x48, 0x6d, 0x78, 0x69, 0xb2, 0xcc, 0x52, 0x5d, 0xd4, 0xfa, + 0x63, 0x06, 0x8a, 0xda, 0x22, 0x79, 0x0c, 0xd9, 0x57, 0xae, 0x2f, 0x43, 0x58, 0x59, 0x5d, 0x99, + 0x4c, 0xe5, 0x53, 0xd7, 0x77, 0xa8, 0x90, 0x23, 0x4d, 0xad, 0xee, 0x66, 0xc5, 0xb6, 0xee, 0x4d, + 0xa6, 0xe3, 0xdc, 0xe2, 0xbb, 0x33, 0x45, 0xdb, 0x90, 0x4d, 0x83, 0x40, 0x36, 0xb4, 0xf9, 0x91, + 0x68, 0x1a, 0x05, 0x2a, 0xc6, 0xe4, 0x0e, 0xbc, 0xe3, 0xfa, 0x2f, 0x02, 0x1e, 0x34, 0x23, 0xe6, + 0xb8, 0x78, 0xf8, 0x5e, 0x9c, 0x85, 0xac, 0x36, 0x27, 0x58, 0xc6, 0x2d, 0x91, 0x26, 0x54, 0x24, + 0xf9, 0xa0, 0x7b, 0xf8, 0x0b, 0xd6, 0xe2, 0x71, 0x2d, 0x27, 0xfc, 0x59, 0x4e, 0xd9, 0xc2, 0xae, + 0x2e, 0x40, 0x87, 0xe4, 0xdf, 0xaa, 0xda, 0xad, 0xbf, 0x18, 0x50, 0x1e, 0x50, 0x4f, 0x3e, 0x1d, + 0x48, 0xd5, 0xed, 0x49, 0xb7, 0xa5, 0x25, 0xeb, 0x33, 0xc8, 0x39, 0x6e, 0x87, 0xc5, 0x5c, 0xa4, + 0xaa, 0xb0, 0xbe, 0xfa, 0xdd, 0xf7, 0x1f, 0xcc, 0xfc, 0xf3, 0xfb, 0x0f, 0x56, 0xb4, 0xab, 0x26, + 0x08, 0x99, 0xdf, 0x0a, 0x7c, 0x6e, 0xbb, 0x3e, 0x8b, 0xf0, 0x82, 0xfd, 0x58, 0x8a, 0xd4, 0x37, + 0xc5, 0x0f, 0x55, 0x1a, 0x30, 0xe8, 0xbe, 0x7d, 0xcc, 0x44, 0x9e, 0x0a, 0x54, 0x8c, 0x2d, 0x0e, + 0x65, 0xca, 0x78, 0x37, 0xf2, 0x29, 0xfb, 0x65, 0x17, 0x99, 0x7e, 0x90, 0x34, 0x12, 0xb1, 0xe9, + 0x8b, 0x1a, 0x3a, 0x32, 0x52, 0x25, 0x40, 0x96, 0x61, 0x8e, 0x45, 0x51, 0x10, 0xa9, 0xe2, 0x21, + 0x75, 0x79, 0xd5, 0xd7, 0xa3, 0xb0, 0x55, 0x3f, 0x10, 0x57, 0x3d, 0x95, 0x0c, 0x56, 0x15, 0x2a, + 0x89, 0xd5, 0x38, 0x0c, 0xfc, 0x98, 0x59, 0x0b, 0x18, 0xba, 0xb0, 0xcb, 0x63, 0xb5, 0x0f, 0xeb, + 0x5b, 0x03, 0x2a, 0x09, 0x45, 0xf2, 0x90, 0x2f, 0xa0, 0xd8, 0x6f, 0x0d, 0x49, 0x0f, 0x78, 0x98, + 0x1a, 0x54, 0x5d, 0x5e, 0xeb, 0x2b, 0xaa, 0x25, 0xe8, 0xea, 0xcc, 0x67, 0x50, 0x1d, 0x66, 0x18, + 0x93, 0xfd, 0x0f, 0x07, 0x1b, 0xc4, 0x70, 0xbf, 0xd2, 0x4e, 0xc3, 0xb7, 0xb3, 0x70, 0x85, 0x32, + 0x81, 0x5d, 0x76, 0x8f, 0xed, 0x0e, 0xdb, 0x08, 0xfc, 0xb6, 0xdb, 0x49, 0xc2, 0x5c, 0x15, 0xcd, + 0x30, 0xd1, 0x8c, 0x7d, 0x71, 0x19, 0xf2, 0x4d, 0xcf, 0xe6, 0xed, 0x20, 0x3a, 0x56, 0xca, 0x4b, + 0xa8, 0x3c, 0xa1, 0xd1, 0xde, 0x2a, 0x59, 0x84, 0xa2, 0x52, 0xbc, 0x1f, 0x38, 0x49, 0x3a, 0x75, + 0x12, 0xa9, 0xc1, 0xfc, 0x5e, 0xd0, 0x79, 0x86, 0xc9, 0x96, 0x15, 0x96, 0x4c, 0x89, 0x05, 0x25, + 0xc5, 0x18, 0xf5, 0xaa, 0x6b, 0x8e, 0x0e, 0xd0, 0xc8, 0xfb, 0x50, 0x38, 0x60, 0x71, 0xec, 0x06, + 0xfe, 0xee, 0x66, 0x2d, 0x27, 0xe4, 0xfb, 0x04, 0xd4, 0x7d, 0xc0, 0x83, 0x88, 0xed, 0x6e, 0xd6, + 0xe6, 0xa5, 0x6e, 0x35, 0x25, 0xfb, 0x50, 0x39, 0x10, 0x38, 0xa7, 0x89, 0xe8, 0xc6, 0x65, 0x71, + 0x2d, 0x2f, 0x52, 0x74, 0x63, 0x34, 0x45, 0x3a, 0x1e, 0xaa, 0x0b, 0xf6, 0x33, 0x3a, 0x24, 0x6c, + 0xfd, 0xd6, 0x00, 0x73, 0x5c, 0x00, 0xd5, 0x69, 0xf8, 0x0c, 0x72, 0xf2, 0x7c, 0xcb, 0x20, 0xfe, + 0x6f, 0x95, 0x21, 0x7f, 0xc9, 0x7b, 0x90, 0x93, 0xda, 0x55, 0x51, 0xab, 0x59, 0x92, 0xa5, 0x4c, + 0x2f, 0x4b, 0xd6, 0xaf, 0x73, 0x50, 0x3a, 0xc0, 0x2d, 0x25, 0x89, 0xac, 0x03, 0xf4, 0xf3, 0xaf, + 0x6a, 0x66, 0xf8, 0x54, 0x68, 0x1c, 0xc4, 0x84, 0xfc, 0xb6, 0x3a, 0x9f, 0xea, 0x8a, 0xec, 0xcd, + 0xc9, 0xe7, 0x50, 0x4c, 0xc6, 0xcf, 0x43, 0x5e, 0xcb, 0x88, 0xe8, 0x3d, 0x48, 0x39, 0xe0, 0xfa, + 0x4e, 0xea, 0x9a, 0xa8, 0x3a, 0xde, 0x1a, 0x85, 0xdc, 0x86, 0x4b, 0xb6, 0xe7, 0x05, 0xa7, 0xaa, + 0x66, 0x45, 0xf5, 0x89, 0xec, 0xe7, 0xe9, 0xe8, 0x02, 0xf6, 0x62, 0x8d, 0xf8, 0x24, 0x8a, 0xec, + 0x33, 0x0c, 0x44, 0x4e, 0xf0, 0x8f, 0x5b, 0xc2, 0xb6, 0xb8, 0xed, 0xfa, 0xb6, 0x57, 0x03, 0xc1, + 0x23, 0x27, 0x78, 0xdc, 0xb6, 0x5e, 0x87, 0x41, 0xc4, 0x59, 0xf4, 0x84, 0xf3, 0xa8, 0x56, 0x14, + 0xe1, 0x1d, 0xa0, 0x91, 0x26, 0x94, 0x36, 0xec, 0xd6, 0x11, 0xdb, 0x3d, 0x46, 0x62, 0x02, 0xdd, + 0xd2, 0x9a, 0xa5, 0x60, 0x7f, 0x1e, 0xea, 0x98, 0x4d, 0xd7, 0x40, 0x5a, 0x50, 0x49, 0x5c, 0x97, + 0x2d, 0xa0, 0x56, 0x16, 0x3a, 0x1f, 0x4d, 0x1b, 0x4a, 0x29, 0x2d, 0x4d, 0x0c, 0xa9, 0xc4, 0x44, + 0x6e, 0x61, 0xb5, 0xdb, 0x9c, 0xd5, 0x2a, 0xc2, 0xe7, 0xde, 0x7c, 0x4c, 0x25, 0x2c, 0xbc, 0x45, + 0x25, 0x98, 0x8f, 0xa1, 0x3a, 0x9c, 0xdc, 0x69, 0x90, 0x97, 0xf9, 0x63, 0x78, 0x67, 0x8c, 0x47, + 0x6f, 0xd5, 0xdd, 0xfe, 0x6c, 0xc0, 0xa5, 0x91, 0x34, 0xe0, 0x0d, 0x23, 0xba, 0x8a, 0x54, 0x29, + 0xc6, 0x64, 0x1f, 0xe6, 0x30, 0xcd, 0xb1, 0xc2, 0x1a, 0x6b, 0xd3, 0xe4, 0xb5, 0x2e, 0x24, 0x65, + 0xfc, 0xa5, 0x16, 0xf3, 0x01, 0x40, 0x9f, 0x38, 0x15, 0xfe, 0xfc, 0x02, 0xca, 0x2a, 0xc9, 0xaa, + 0x83, 0x54, 0x25, 0x6c, 0x51, 0xc2, 0x08, 0x4b, 0xfa, 0x97, 0x5f, 0x66, 0xca, 0xcb, 0xcf, 0xfa, + 0x0a, 0x16, 0x28, 0xb3, 0x9d, 0x6d, 0xd7, 0x63, 0xe7, 0xf7, 0x78, 0x2c, 0x7e, 0xd7, 0x63, 0x4d, + 0x84, 0x3e, 0x49, 0xf1, 0xab, 0x39, 0x79, 0x08, 0x73, 0xd4, 0xf6, 0x3b, 0x4c, 0x99, 0xfe, 0x30, + 0xc5, 0xb4, 0x30, 0x82, 0xbc, 0x54, 0x8a, 0x58, 0x8f, 0xa0, 0xd0, 0xa3, 0x61, 0x33, 0x7b, 0xde, + 0x6e, 0xc7, 0x4c, 0x36, 0xc6, 0x0c, 0x55, 0x33, 0xa4, 0xef, 0x31, 0xbf, 0xa3, 0x4c, 0x67, 0xa8, + 0x9a, 0x59, 0x4b, 0xf8, 0x5e, 0x48, 0x76, 0xae, 0x42, 0x43, 0x20, 0xbb, 0x89, 0xf8, 0xd0, 0x10, + 0xf5, 0x2a, 0xc6, 0x96, 0x83, 0x97, 0xb6, 0xed, 0x6c, 0xba, 0xd1, 0xf9, 0x0e, 0xd6, 0x60, 0x7e, + 0xd3, 0x8d, 0x34, 0xff, 0x92, 0x29, 0x59, 0xc2, 0xeb, 0xbc, 0xe5, 0x75, 0x1d, 0xf4, 0x96, 0xb3, + 0xc8, 0x57, 0x5d, 0x75, 0x88, 0x6a, 0x7d, 0x22, 0xe3, 0x28, 0xac, 0xa8, 0xcd, 0xdc, 0x86, 0x79, + 0xe6, 0xf3, 0x08, 0xcb, 0x48, 0xde, 0xf9, 0xa4, 0x2e, 0x5f, 0xe0, 0x75, 0xf1, 0x02, 0x17, 0xd8, + 0x82, 0x26, 0x2c, 0xd6, 0x1a, 0x2c, 0x20, 0x21, 0x3d, 0x11, 0x04, 0xb2, 0xda, 0x26, 0xc5, 0xd8, + 0x7a, 0x08, 0xd5, 0xbe, 0xa0, 0x32, 0xbd, 0x04, 0x59, 0x04, 0xbf, 0xaa, 0xaf, 0x8f, 0xb3, 0x2b, + 0xd6, 0xad, 0xeb, 0xb0, 0x90, 0x14, 0xff, 0xb9, 0x46, 0x2d, 0x02, 0xd5, 0x3e, 0x93, 0xc2, 0x3d, + 0x65, 0x28, 0x36, 0x5d, 0x3f, 0x81, 0x05, 0xd6, 0x1b, 0x03, 0x4a, 0xcd, 0xc0, 0xef, 0xdf, 0x72, + 0x4d, 0x58, 0x48, 0x4a, 0xf7, 0x49, 0x73, 0x77, 0xc3, 0x0e, 0x93, 0x18, 0x2c, 0x8e, 0x9e, 0x0f, + 0xf5, 0x0d, 0xa3, 0x2e, 0x19, 0xd7, 0xb3, 0x78, 0x21, 0xd2, 0x61, 0x71, 0xf2, 0x29, 0xcc, 0xef, + 0xed, 0xad, 0x0b, 0x4d, 0xb3, 0x53, 0x69, 0x4a, 0xc4, 0xc8, 0x63, 0x98, 0x7f, 0x29, 0x3e, 0xad, + 0xc4, 0xea, 0x8a, 0x1a, 0x73, 0x56, 0x65, 0x84, 0x24, 0x1b, 0x65, 0xad, 0x20, 0x72, 0x68, 0x22, + 0x64, 0xfd, 0xdb, 0x80, 0xe2, 0x4b, 0xbb, 0x0f, 0x39, 0xfb, 0x18, 0xf7, 0x2d, 0x6e, 0x72, 0x85, + 0x71, 0x2f, 0xc3, 0x9c, 0xc7, 0x4e, 0x98, 0xa7, 0xce, 0xb8, 0x9c, 0x20, 0x35, 0x3e, 0x0a, 0x22, + 0x59, 0xd6, 0x25, 0x2a, 0x27, 0x58, 0x10, 0x0e, 0xe3, 0xb6, 0xeb, 0xd5, 0xb2, 0x8b, 0x19, 0xbc, + 0xf5, 0xe5, 0x0c, 0x33, 0xd7, 0x8d, 0x3c, 0xf5, 0xf0, 0xc0, 0x21, 0xb1, 0x20, 0xeb, 0xfa, 0xed, + 0x40, 0xdc, 0x7f, 0xaa, 0x2d, 0xca, 0x16, 0xbd, 0xeb, 0xb7, 0x03, 0x2a, 0xd6, 0xc8, 0x35, 0xc8, + 0x45, 0x58, 0x7f, 0x71, 0x6d, 0x5e, 0x04, 0xa5, 0x80, 0x5c, 0xb2, 0x4a, 0xd5, 0x82, 0x55, 0x81, + 0x92, 0xf4, 0x5b, 0x25, 0xff, 0x4f, 0xb3, 0xf0, 0xce, 0x33, 0x76, 0xba, 0x91, 0xf8, 0x95, 0x04, + 0x64, 0x11, 0x8a, 0x3d, 0xda, 0xee, 0xa6, 0x3a, 0x42, 0x3a, 0x09, 0x8d, 0xed, 0x07, 0x5d, 0x9f, + 0x27, 0x39, 0x14, 0xc6, 0x04, 0x85, 0xaa, 0x05, 0x72, 0x03, 0xe6, 0x9f, 0x31, 0x7e, 0x1a, 0x44, + 0xaf, 0x84, 0xd7, 0x95, 0xd5, 0x22, 0xf2, 0x3c, 0x63, 0x1c, 0x11, 0x22, 0x4d, 0xd6, 0x10, 0x76, + 0x86, 0x09, 0xec, 0xcc, 0x8e, 0x83, 0x9d, 0xc9, 0x2a, 0x59, 0x83, 0x62, 0x2b, 0xf0, 0x63, 0x1e, + 0xd9, 0x2e, 0x1a, 0x9e, 0x13, 0xcc, 0xef, 0x22, 0xb3, 0x4c, 0xec, 0x46, 0x7f, 0x91, 0xea, 0x9c, + 0x64, 0x05, 0x80, 0xbd, 0xe6, 0x91, 0xbd, 0x13, 0xc4, 0xbd, 0x27, 0x1a, 0xa0, 0x1c, 0x12, 0x76, + 0x9b, 0x54, 0x5b, 0xc5, 0x0e, 0x79, 0x14, 0xc4, 0x5c, 0xbc, 0x53, 0x24, 0xbc, 0xec, 0xcd, 0xad, + 0xf7, 0xe0, 0xf2, 0x60, 0xb4, 0x54, 0x18, 0x1f, 0xc1, 0xff, 0x51, 0xe6, 0x31, 0x3b, 0x66, 0xd3, + 0x47, 0xd2, 0x32, 0xa1, 0x36, 0x2a, 0xac, 0x14, 0xff, 0x27, 0x03, 0xc5, 0xad, 0xd7, 0xac, 0xb5, + 0xcf, 0xe2, 0xd8, 0xee, 0x08, 0x60, 0xdc, 0x8c, 0x82, 0x16, 0x8b, 0xe3, 0x9e, 0xae, 0x3e, 0x81, + 0xfc, 0x10, 0xb2, 0xbb, 0xbe, 0xcb, 0xd5, 0xdd, 0xb9, 0x94, 0xfa, 0x2e, 0x71, 0xb9, 0xd2, 0xb9, + 0x33, 0x43, 0x85, 0x14, 0x79, 0x08, 0x59, 0xec, 0x3c, 0x93, 0x74, 0x7f, 0x47, 0x93, 0x45, 0x19, + 0xb2, 0x2e, 0xbe, 0x1f, 0xba, 0x5f, 0x32, 0x95, 0xc1, 0xe5, 0xf4, 0x6b, 0xcb, 0xfd, 0x92, 0xf5, + 0x35, 0x28, 0x49, 0xb2, 0x85, 0xb0, 0xde, 0x8e, 0x38, 0x73, 0x54, 0x66, 0x6f, 0xa6, 0x81, 0x25, + 0xc9, 0xd9, 0xd7, 0x92, 0xc8, 0x62, 0x10, 0xb6, 0x5e, 0xbb, 0x5c, 0x55, 0x4a, 0x5a, 0x10, 0x90, + 0x4d, 0x73, 0x04, 0xa7, 0x28, 0xbd, 0x19, 0xf8, 0x32, 0xf3, 0xe9, 0xd2, 0xc8, 0xa6, 0x49, 0xe3, + 0x14, 0xc3, 0x70, 0xe0, 0x76, 0x10, 0x83, 0xe6, 0x2f, 0x0c, 0x83, 0x64, 0xd4, 0xc2, 0x20, 0x09, + 0xeb, 0xf3, 0x30, 0x27, 0x20, 0x92, 0xf5, 0x77, 0x03, 0x8a, 0x5a, 0x9e, 0x26, 0xa8, 0xc9, 0xf7, + 0x21, 0xbb, 0xcf, 0xc4, 0x37, 0x15, 0x34, 0x9e, 0x17, 0x15, 0xc9, 0xb8, 0x4d, 0x05, 0x15, 0x9b, + 0xca, 0xb6, 0x23, 0x1b, 0x66, 0x99, 0xe2, 0x10, 0x29, 0x2f, 0xf8, 0x99, 0x48, 0x59, 0x9e, 0xe2, + 0x90, 0xdc, 0x86, 0xfc, 0x01, 0x6b, 0x75, 0x23, 0x97, 0x9f, 0x89, 0x24, 0x54, 0x56, 0xab, 0xa2, + 0xd5, 0x28, 0x9a, 0x28, 0xdc, 0x1e, 0x07, 0xb9, 0x05, 0x85, 0x98, 0xb5, 0x22, 0xc6, 0x99, 0x7f, + 0xa2, 0xaa, 0xaa, 0xac, 0xd8, 0x23, 0xc6, 0xb7, 0xfc, 0x13, 0xda, 0x5f, 0xb7, 0x9e, 0xe2, 0x49, + 0xee, 0x7b, 0x43, 0x20, 0xbb, 0x81, 0x6f, 0x47, 0x74, 0xa3, 0x4c, 0xc5, 0x18, 0x9f, 0xef, 0x5b, + 0x17, 0x3d, 0xdf, 0xb7, 0x92, 0xe7, 0xfb, 0xe0, 0x09, 0xc0, 0x6b, 0x4c, 0xcb, 0x88, 0xf5, 0x04, + 0x0a, 0xbd, 0x53, 0x4a, 0x2a, 0x30, 0xbb, 0xed, 0x28, 0x4b, 0xb3, 0xdb, 0x0e, 0xfa, 0xbd, 0xf5, + 0x7c, 0x5b, 0x58, 0xc9, 0x53, 0x1c, 0xf6, 0xd0, 0x46, 0x46, 0x43, 0x1b, 0x6b, 0x50, 0x1e, 0x38, + 0xaa, 0xc8, 0x44, 0x83, 0xd3, 0x38, 0xd9, 0x32, 0x8e, 0xa5, 0x1b, 0x5e, 0x2c, 0x74, 0x09, 0x37, + 0xbc, 0xd8, 0xba, 0x0e, 0xe5, 0x81, 0xe4, 0x22, 0x93, 0x78, 0x09, 0x2b, 0x50, 0x8a, 0xe3, 0x15, + 0x06, 0x0b, 0x43, 0x1f, 0xc7, 0xc8, 0x0d, 0xc8, 0xc9, 0x8f, 0x30, 0xd5, 0x19, 0xf3, 0xca, 0xd7, + 0xdf, 0x2c, 0xbe, 0x3b, 0xc4, 0x20, 0x17, 0x91, 0x6d, 0xbd, 0xeb, 0x3b, 0x1e, 0xab, 0x1a, 0x63, + 0xd9, 0xe4, 0xa2, 0x99, 0xfd, 0xcd, 0x1f, 0xae, 0xce, 0xac, 0xd8, 0x70, 0x69, 0xe4, 0xc3, 0x0e, + 0xb9, 0x0e, 0xd9, 0x03, 0xe6, 0xb5, 0x13, 0x33, 0x23, 0x0c, 0xb8, 0x48, 0xae, 0x41, 0x86, 0xda, + 0xa7, 0x55, 0xc3, 0xac, 0x7d, 0xfd, 0xcd, 0xe2, 0xe5, 0xd1, 0xaf, 0x43, 0xf6, 0xa9, 0x34, 0xb1, + 0xfa, 0x57, 0x80, 0xc2, 0xde, 0xde, 0xfa, 0x7a, 0xe4, 0x3a, 0x1d, 0x46, 0x7e, 0x65, 0x00, 0x19, + 0x7d, 0x33, 0x93, 0x7b, 0xe9, 0x0d, 0x61, 0xfc, 0x37, 0x0a, 0xf3, 0xfe, 0x94, 0x52, 0x0a, 0xb2, + 0x7c, 0x0e, 0x73, 0x02, 0x67, 0x93, 0x8f, 0x26, 0x7c, 0x6e, 0x99, 0xcb, 0x17, 0x33, 0x2a, 0xdd, + 0x2d, 0xc8, 0x27, 0x58, 0x95, 0xac, 0xa4, 0x6e, 0x6f, 0x00, 0x8a, 0x9b, 0xb7, 0x26, 0xe2, 0x55, + 0x46, 0x7e, 0x0e, 0xf3, 0x0a, 0x82, 0x92, 0x9b, 0x17, 0xc8, 0xf5, 0xc1, 0xb0, 0xb9, 0x32, 0x09, + 0x6b, 0xdf, 0x8d, 0x04, 0x6a, 0xa6, 0xba, 0x31, 0x04, 0x64, 0x53, 0xdd, 0x18, 0xc1, 0xae, 0xad, + 0xfe, 0x03, 0x35, 0xd5, 0xc8, 0x10, 0x70, 0x4d, 0x35, 0x32, 0x8c, 0x5f, 0xc9, 0x4b, 0xc8, 0x22, + 0x7e, 0x25, 0x69, 0xbd, 0x5a, 0x03, 0xb8, 0x66, 0xda, 0x99, 0x18, 0x00, 0xbe, 0x3f, 0xc3, 0x3b, + 0x4d, 0x7c, 0x8b, 0x48, 0xbf, 0xcd, 0xb4, 0x6f, 0x97, 0xe6, 0xcd, 0x09, 0x38, 0xfb, 0xea, 0xd5, + 0x3b, 0x7e, 0x79, 0x82, 0x0f, 0x88, 0x17, 0xab, 0x1f, 0xfa, 0x54, 0x19, 0x40, 0x49, 0x87, 0x2a, + 0xa4, 0x9e, 0x22, 0x3a, 0x06, 0x01, 0x9a, 0x8d, 0x89, 0xf9, 0x95, 0xc1, 0xaf, 0xf0, 0x11, 0x37, + 0x08, 0x63, 0xc8, 0x6a, 0x6a, 0x38, 0xc6, 0x02, 0x26, 0xf3, 0xee, 0x54, 0x32, 0xca, 0xb8, 0x2d, + 0x61, 0x92, 0x82, 0x42, 0x24, 0xfd, 0xd6, 0xef, 0xc1, 0x29, 0x73, 0x42, 0xbe, 0x65, 0xe3, 0x8e, + 0x81, 0xe7, 0x0c, 0xa1, 0x73, 0xaa, 0x6e, 0xed, 0x4d, 0x91, 0x7a, 0xce, 0x74, 0x0c, 0xbe, 0x5e, + 0xfa, 0xee, 0xcd, 0x55, 0xe3, 0x1f, 0x6f, 0xae, 0x1a, 0xff, 0x7a, 0x73, 0xd5, 0x38, 0xcc, 0x89, + 0x7f, 0x64, 0xef, 0xfe, 0x37, 0x00, 0x00, 0xff, 0xff, 0x20, 0x47, 0x7d, 0x27, 0x1a, 0x1f, 0x00, + 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -4014,6 +4049,20 @@ func (m *ResolveImageConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, erro i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.SourcePolicies) > 0 { + for iNdEx := len(m.SourcePolicies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SourcePolicies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } if len(m.StoreID) > 0 { i -= len(m.StoreID) copy(dAtA[i:], m.StoreID) @@ -4093,6 +4142,13 @@ func (m *ResolveImageConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, err i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0x1a + } if len(m.Config) > 0 { i -= len(m.Config) copy(dAtA[i:], m.Config) @@ -4964,6 +5020,13 @@ func (m *NewContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x3a + } if len(m.ExtraHosts) > 0 { for iNdEx := len(m.ExtraHosts) - 1; iNdEx >= 0; iNdEx-- { { @@ -5333,6 +5396,20 @@ func (m *InitMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Secretenv) > 0 { + for iNdEx := len(m.Secretenv) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secretenv[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } if m.Security != 0 { i = encodeVarintGateway(dAtA, i, uint64(m.Security)) i-- @@ -5958,6 +6035,12 @@ func (m *ResolveImageConfigRequest) Size() (n int) { if l > 0 { n += 1 + l + sovGateway(uint64(l)) } + if len(m.SourcePolicies) > 0 { + for _, e := range m.SourcePolicies { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5978,6 +6061,10 @@ func (m *ResolveImageConfigResponse) Size() (n int) { if l > 0 { n += 1 + l + sovGateway(uint64(l)) } + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6391,6 +6478,10 @@ func (m *NewContainerRequest) Size() (n int) { n += 1 + l + sovGateway(uint64(l)) } } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -6567,6 +6658,12 @@ func (m *InitMessage) Size() (n int) { if m.Security != 0 { n += 1 + sovGateway(uint64(m.Security)) } + if len(m.Secretenv) > 0 { + for _, e := range m.Secretenv { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -8800,6 +8897,40 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { } m.StoreID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePolicies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourcePolicies = append(m.SourcePolicies, &pb1.Policy{}) + if err := m.SourcePolicies[len(m.SourcePolicies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -8917,6 +9048,38 @@ func (m *ResolveImageConfigResponse) Unmarshal(dAtA []byte) error { m.Config = []byte{} } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -11463,6 +11626,38 @@ func (m *NewContainerRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -12210,6 +12405,40 @@ func (m *InitMessage) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secretenv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secretenv = append(m.Secretenv, &pb.SecretEnv{}) + if err := m.Secretenv[len(m.Secretenv)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto index 2e55f1db86..c00d97391a 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto @@ -11,6 +11,7 @@ import "github.com/tonistiigi/fsutil/types/stat.proto"; import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto"; + option (gogoproto.sizer_all) = true; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; @@ -38,7 +39,7 @@ service LLBBridge { rpc ExecProcess(stream ExecMessage) returns (stream ExecMessage); // apicaps:CapGatewayWarnings - rpc Warn(WarnRequest) returns (WarnResponse); + rpc Warn(WarnRequest) returns (WarnResponse); } message Result { @@ -124,11 +125,13 @@ message ResolveImageConfigRequest { int32 ResolverType = 5; string SessionID = 6; string StoreID = 7; + repeated moby.buildkit.v1.sourcepolicy.Policy SourcePolicies = 8; } message ResolveImageConfigResponse { string Digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; bytes Config = 2; + string Ref = 3; } message SolveRequest { @@ -138,7 +141,7 @@ message SolveRequest { // 4 was removed in BuildKit v0.11.0. bool allowResultReturn = 5; bool allowResultArrayRef = 6; - + // apicaps.CapSolveInlineReturn deprecated bool Final = 10; bytes ExporterAttr = 11; @@ -165,7 +168,7 @@ message SolveResponse { string ref = 1; // can be used by readfile request // deprecated /* bytes ExporterAttr = 2;*/ - + // these fields are returned when allowMapReturn was set Result result = 3; } @@ -239,6 +242,7 @@ message NewContainerRequest { pb.Platform platform = 4; pb.WorkerConstraints constraints = 5; repeated pb.HostIP extraHosts = 6; + string hostname = 7; } message NewContainerResponse{} @@ -255,7 +259,7 @@ message ExecMessage { // InitMessage sent from client to server will start a new process in a // container InitMessage Init = 2; - // FdMessage used from client to server for input (stdin) and + // FdMessage used from client to server for input (stdin) and // from server to client for output (stdout, stderr) FdMessage File = 3; // ResizeMessage used from client to server for terminal resize events @@ -280,6 +284,7 @@ message InitMessage{ repeated uint32 Fds = 3; bool Tty = 4; pb.SecurityMode Security = 5; + repeated pb.SecretEnv secretenv = 6; } message ExitMessage { diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.go index e313542629..d299d7ad9e 100644 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync.go +++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.go @@ -4,8 +4,11 @@ import ( "context" "fmt" io "io" + "net/url" "os" + "strconv" "strings" + "unicode" "github.com/moby/buildkit/session" "github.com/pkg/errors" @@ -82,6 +85,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr } opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object + opts = decodeOpts(opts) dirName := "" name, ok := opts[keyDirName] @@ -209,6 +213,8 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error { var stream grpc.ClientStream + opts = encodeOpts(opts) + ctx = metadata.NewOutgoingContext(ctx, opts) switch pr.name { @@ -280,7 +286,7 @@ func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) (err error) { } defer func() { err1 := wc.Close() - if err != nil { + if err == nil { err = err1 } }() @@ -337,3 +343,60 @@ func (e InvalidSessionError) Error() string { func (e InvalidSessionError) Unwrap() error { return e.err } + +func encodeOpts(opts map[string][]string) map[string][]string { + md := make(map[string][]string, len(opts)) + for k, v := range opts { + out, encoded := encodeStringForHeader(v) + md[k] = out + if encoded { + md[k+"-encoded"] = []string{"1"} + } + } + return md +} + +func decodeOpts(opts map[string][]string) map[string][]string { + md := make(map[string][]string, len(opts)) + for k, v := range opts { + out := make([]string, len(v)) + var isDecoded bool + if v, ok := opts[k+"-encoded"]; ok && len(v) > 0 { + if b, _ := strconv.ParseBool(v[0]); b { + isDecoded = true + } + } + if isDecoded { + for i, s := range v { + out[i], _ = url.QueryUnescape(s) + } + } else { + copy(out, v) + } + md[k] = out + } + return md +} + +// encodeStringForHeader encodes a string value so it can be used in grpc header. This encoding +// is backwards compatible and avoids encoding ASCII characters. +func encodeStringForHeader(inputs []string) ([]string, bool) { + var encode bool + for _, input := range inputs { + for _, runeVal := range input { + // Only encode non-ASCII characters, and characters that have special + // meaning during decoding. + if runeVal > unicode.MaxASCII { + encode = true + break + } + } + } + if !encode { + return inputs, false + } + for i, input := range inputs { + inputs[i] = url.QueryEscape(input) + } + return inputs, true +} diff --git a/vendor/github.com/moby/buildkit/session/grpc.go b/vendor/github.com/moby/buildkit/session/grpc.go index 6fac82e0b0..bf8180722a 100644 --- a/vendor/github.com/moby/buildkit/session/grpc.go +++ b/vendor/github.com/moby/buildkit/session/grpc.go @@ -134,7 +134,7 @@ func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func()) } } - bklog.G(ctx).WithFields(logFields).Debug("healthcheck completed") + bklog.G(ctx).WithFields(logFields).Trace("healthcheck completed") } } } diff --git a/vendor/github.com/moby/buildkit/session/sshforward/copy.go b/vendor/github.com/moby/buildkit/session/sshforward/copy.go index a4a065b46e..eac5f7614a 100644 --- a/vendor/github.com/moby/buildkit/session/sshforward/copy.go +++ b/vendor/github.com/moby/buildkit/session/sshforward/copy.go @@ -24,10 +24,12 @@ func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStre if err == io.EOF { // indicates client performed CloseSend, but they may still be // reading data - if conn, ok := conn.(interface { + if closeWriter, ok := conn.(interface { CloseWrite() error }); ok { - conn.CloseWrite() + closeWriter.CloseWrite() + } else { + conn.Close() } return nil } diff --git a/vendor/github.com/moby/buildkit/snapshot/containerd/content.go b/vendor/github.com/moby/buildkit/snapshot/containerd/content.go index 3c730523a7..b4bb2f300b 100644 --- a/vendor/github.com/moby/buildkit/snapshot/containerd/content.go +++ b/vendor/github.com/moby/buildkit/snapshot/containerd/content.go @@ -5,64 +5,80 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/namespaces" + "github.com/containerd/nydus-snapshotter/pkg/errdefs" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) -func NewContentStore(store content.Store, ns string) content.Store { - return &nsContent{ns, store} +func NewContentStore(store content.Store, ns string) *Store { + return &Store{ns, store} } -type nsContent struct { +type Store struct { ns string content.Store } -func (c *nsContent) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { +func (c *Store) Namespace() string { + return c.ns +} + +func (c *Store) WithNamespace(ns string) *Store { + return NewContentStore(c.Store, ns) +} + +func (c *Store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { ctx = namespaces.WithNamespace(ctx, c.ns) return c.Store.Info(ctx, dgst) } -func (c *nsContent) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { +func (c *Store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { ctx = namespaces.WithNamespace(ctx, c.ns) return c.Store.Update(ctx, info, fieldpaths...) } -func (c *nsContent) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { +func (c *Store) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { ctx = namespaces.WithNamespace(ctx, c.ns) return c.Store.Walk(ctx, fn, filters...) } -func (c *nsContent) Delete(ctx context.Context, dgst digest.Digest) error { +func (c *Store) Delete(ctx context.Context, dgst digest.Digest) error { return errors.Errorf("contentstore.Delete usage is forbidden") } -func (c *nsContent) Status(ctx context.Context, ref string) (content.Status, error) { +func (c *Store) Status(ctx context.Context, ref string) (content.Status, error) { ctx = namespaces.WithNamespace(ctx, c.ns) return c.Store.Status(ctx, ref) } -func (c *nsContent) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { +func (c *Store) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { ctx = namespaces.WithNamespace(ctx, c.ns) return c.Store.ListStatuses(ctx, filters...) } -func (c *nsContent) Abort(ctx context.Context, ref string) error { +func (c *Store) Abort(ctx context.Context, ref string) error { ctx = namespaces.WithNamespace(ctx, c.ns) return c.Store.Abort(ctx, ref) } -func (c *nsContent) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { +func (c *Store) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { ctx = namespaces.WithNamespace(ctx, c.ns) return c.Store.ReaderAt(ctx, desc) } -func (c *nsContent) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { +func (c *Store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { return c.writer(ctx, 3, opts...) } -func (c *nsContent) writer(ctx context.Context, retries int, opts ...content.WriterOpt) (content.Writer, error) { +func (c *Store) WithFallbackNS(ns string) content.Store { + return &nsFallbackStore{ + main: c, + fb: c.WithNamespace(ns), + } +} + +func (c *Store) writer(ctx context.Context, retries int, opts ...content.WriterOpt) (content.Writer, error) { ctx = namespaces.WithNamespace(ctx, c.ns) w, err := c.Store.Writer(ctx, opts...) if err != nil { @@ -80,3 +96,58 @@ func (w *nsWriter) Commit(ctx context.Context, size int64, expected digest.Diges ctx = namespaces.WithNamespace(ctx, w.ns) return w.Writer.Commit(ctx, size, expected, opts...) } + +type nsFallbackStore struct { + main *Store + fb *Store +} + +var _ content.Store = &nsFallbackStore{} + +func (c *nsFallbackStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + info, err := c.main.Info(ctx, dgst) + if err != nil { + if errdefs.IsNotFound(err) { + return c.fb.Info(ctx, dgst) + } + } + return info, err +} + +func (c *nsFallbackStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + return c.main.Update(ctx, info, fieldpaths...) +} + +func (c *nsFallbackStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { + return c.main.Walk(ctx, fn, filters...) +} + +func (c *nsFallbackStore) Delete(ctx context.Context, dgst digest.Digest) error { + return c.main.Delete(ctx, dgst) +} + +func (c *nsFallbackStore) Status(ctx context.Context, ref string) (content.Status, error) { + return c.main.Status(ctx, ref) +} + +func (c *nsFallbackStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { + return c.main.ListStatuses(ctx, filters...) +} + +func (c *nsFallbackStore) Abort(ctx context.Context, ref string) error { + return c.main.Abort(ctx, ref) +} + +func (c *nsFallbackStore) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { + ra, err := c.main.ReaderAt(ctx, desc) + if err != nil { + if errdefs.IsNotFound(err) { + return c.fb.ReaderAt(ctx, desc) + } + } + return ra, err +} + +func (c *nsFallbackStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + return c.main.Writer(ctx, opts...) +} diff --git a/vendor/github.com/moby/buildkit/snapshot/diffapply_unix.go b/vendor/github.com/moby/buildkit/snapshot/diffapply_unix.go index 136d7a6282..c4875000ea 100644 --- a/vendor/github.com/moby/buildkit/snapshot/diffapply_unix.go +++ b/vendor/github.com/moby/buildkit/snapshot/diffapply_unix.go @@ -167,8 +167,7 @@ func applierFor(dest Mountable, tryCrossSnapshotLink, userxattr bool) (_ *applie } mnt := mnts[0] - switch mnt.Type { - case "overlay": + if overlay.IsOverlayMountType(mnt) { for _, opt := range mnt.Options { if strings.HasPrefix(opt, "upperdir=") { a.root = strings.TrimPrefix(opt, "upperdir=") @@ -183,9 +182,9 @@ func applierFor(dest Mountable, tryCrossSnapshotLink, userxattr bool) (_ *applie return nil, errors.Errorf("could not find lowerdir in mount options %v", mnt.Options) } a.createWhiteoutDelete = true - case "bind", "rbind": + } else if mnt.Type == "bind" || mnt.Type == "rbind" { a.root = mnt.Source - default: + } else { mnter := LocalMounter(dest) root, err := mnter.Mount() if err != nil { @@ -570,10 +569,9 @@ func differFor(lowerMntable, upperMntable Mountable) (_ *differ, rerr error) { } if len(upperMnts) == 1 { - switch upperMnts[0].Type { - case "bind", "rbind": + if upperMnts[0].Type == "bind" || upperMnts[0].Type == "rbind" { d.upperBindSource = upperMnts[0].Source - case "overlay": + } else if overlay.IsOverlayMountType(upperMnts[0]) { overlayDirs, err := overlay.GetOverlayLayers(upperMnts[0]) if err != nil { return nil, errors.Wrapf(err, "failed to get overlay layers from mount %+v", upperMnts[0]) diff --git a/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go b/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go index df2e99b6c1..0e0a37fe67 100644 --- a/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go +++ b/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go @@ -1,16 +1,20 @@ package snapshot import ( + "os" + + "github.com/Microsoft/go-winio/pkg/bindfilter" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/mount" "github.com/pkg/errors" + "golang.org/x/sys/windows" ) func (lm *localMounter) Mount() (string, error) { lm.mu.Lock() defer lm.mu.Unlock() - if lm.mounts == nil { + if lm.mounts == nil && lm.mountable != nil { mounts, release, err := lm.mountable.Mount() if err != nil { return "", err @@ -26,27 +30,30 @@ func (lm *localMounter) Mount() (string, error) { } m := lm.mounts[0] + dir, err := os.MkdirTemp("", "buildkit-mount") + if err != nil { + return "", errors.Wrap(err, "failed to create temp dir") + } if m.Type == "bind" || m.Type == "rbind" { - ro := false - for _, opt := range m.Options { - if opt == "ro" { - ro = true - break - } - } - if !ro { + if !m.ReadOnly() { + // This is a rw bind mount, we can simply return the source. + // NOTE(gabriel-samfira): This is safe to do if the source of the bind mount is a DOS path + // of a local folder. If it's a \\?\Volume{} (for any reason that I can't think of now) + // we should allow bindfilter.ApplyFileBinding() to mount it. return m.Source, nil } + // The Windows snapshotter does not have any notion of bind mounts. We emulate + // bind mounts here using the bind filter. + if err := bindfilter.ApplyFileBinding(dir, m.Source, m.ReadOnly()); err != nil { + return "", errors.Wrapf(err, "failed to mount %v: %+v", m, err) + } + } else { + if err := m.Mount(dir); err != nil { + return "", errors.Wrapf(err, "failed to mount %v: %+v", m, err) + } } - // Windows mounts always activate in-place, so the target of the mount must be the source directory. - // See https://github.com/containerd/containerd/pull/2366 - dir := m.Source - - if err := m.Mount(dir); err != nil { - return "", errors.Wrapf(err, "failed to mount in-place: %v", m) - } lm.target = dir return lm.target, nil } @@ -55,10 +62,34 @@ func (lm *localMounter) Unmount() error { lm.mu.Lock() defer lm.mu.Unlock() + // NOTE(gabriel-samfira): Should we just return nil if len(lm.mounts) == 0? + // Calling Mount() would fail on an instance of the localMounter where mounts contains + // anything other than 1 mount. + if len(lm.mounts) != 1 { + return errors.Wrapf(errdefs.ErrNotImplemented, "request to mount %d layers, only 1 is supported", len(lm.mounts)) + } + m := lm.mounts[0] + if lm.target != "" { - if err := mount.Unmount(lm.target, 0); err != nil { - return err + if m.Type == "bind" || m.Type == "rbind" { + if err := bindfilter.RemoveFileBinding(lm.target); err != nil { + // The following two errors denote that lm.target is not a mount point. + if !errors.Is(err, windows.ERROR_INVALID_PARAMETER) && !errors.Is(err, windows.ERROR_NOT_FOUND) { + return errors.Wrapf(err, "failed to unmount %v: %+v", lm.target, err) + } + } + } else { + // The containerd snapshotter uses the bind filter internally to mount windows-layer + // volumes. We use same bind filter here to emulate bind mounts. In theory we could + // simply call mount.Unmount() here, without the extra check for bind mounts and explicit + // call to bindfilter.RemoveFileBinding() (above), but this would operate under the + // assumption that the internal implementation in containerd will always be based on the + // bind filter, which feels brittle. + if err := mount.Unmount(lm.target, 0); err != nil { + return errors.Wrapf(err, "failed to unmount %v: %+v", lm.target, err) + } } + os.RemoveAll(lm.target) lm.target = "" } diff --git a/vendor/github.com/moby/buildkit/snapshot/snapshotter.go b/vendor/github.com/moby/buildkit/snapshot/snapshotter.go index edf95cee70..f5c59f1735 100644 --- a/vendor/github.com/moby/buildkit/snapshot/snapshotter.go +++ b/vendor/github.com/moby/buildkit/snapshot/snapshotter.go @@ -167,6 +167,8 @@ func setRedirectDir(mounts []mount.Mount, redirectDirOption string) (ret []mount return mounts } for _, m := range mounts { + // Replace redirect_dir options, but only for overlay. + // redirect_dir is not supported by fuse-overlayfs. if m.Type == "overlay" { var opts []string for _, o := range m.Options { diff --git a/vendor/github.com/moby/buildkit/solver/cachemanager.go b/vendor/github.com/moby/buildkit/solver/cachemanager.go index f8bfbd23dd..5f5d9f33e2 100644 --- a/vendor/github.com/moby/buildkit/solver/cachemanager.go +++ b/vendor/github.com/moby/buildkit/solver/cachemanager.go @@ -25,7 +25,7 @@ func NewCacheManager(ctx context.Context, id string, storage CacheKeyStorage, re results: results, } - if err := cm.ReleaseUnreferenced(); err != nil { + if err := cm.ReleaseUnreferenced(ctx); err != nil { bklog.G(ctx).Errorf("failed to release unreferenced cache metadata: %+v", err) } @@ -40,10 +40,10 @@ type cacheManager struct { results CacheResultStorage } -func (c *cacheManager) ReleaseUnreferenced() error { +func (c *cacheManager) ReleaseUnreferenced(ctx context.Context) error { return c.backend.Walk(func(id string) error { return c.backend.WalkResults(id, func(cr CacheResult) error { - if !c.results.Exists(cr.ID) { + if !c.results.Exists(ctx, cr.ID) { c.backend.Release(cr.ID) } return nil @@ -112,10 +112,10 @@ func (c *cacheManager) Query(deps []CacheKeyWithSelector, input Index, dgst dige return keys, nil } -func (c *cacheManager) Records(ck *CacheKey) ([]*CacheRecord, error) { +func (c *cacheManager) Records(ctx context.Context, ck *CacheKey) ([]*CacheRecord, error) { outs := make([]*CacheRecord, 0) if err := c.backend.WalkResults(c.getID(ck), func(r CacheResult) error { - if c.results.Exists(r.ID) { + if c.results.Exists(ctx, r.ID) { outs = append(outs, &CacheRecord{ ID: r.ID, cacheManager: c, @@ -217,6 +217,11 @@ func (c *cacheManager) LoadWithParents(ctx context.Context, rec *CacheRecord) ([ r.Release(context.TODO()) } } + for _, r := range m { + // refs added to results are deleted from m by filterResults + // so release any leftovers + r.Release(context.TODO()) + } return results, nil } diff --git a/vendor/github.com/moby/buildkit/solver/cachestorage.go b/vendor/github.com/moby/buildkit/solver/cachestorage.go index 77724ac4c4..7f426fbedf 100644 --- a/vendor/github.com/moby/buildkit/solver/cachestorage.go +++ b/vendor/github.com/moby/buildkit/solver/cachestorage.go @@ -49,5 +49,5 @@ type CacheResultStorage interface { Save(Result, time.Time) (CacheResult, error) Load(ctx context.Context, res CacheResult) (Result, error) LoadRemotes(ctx context.Context, res CacheResult, compression *compression.Config, s session.Group) ([]*Remote, error) - Exists(id string) bool + Exists(ctx context.Context, id string) bool } diff --git a/vendor/github.com/moby/buildkit/solver/combinedcache.go b/vendor/github.com/moby/buildkit/solver/combinedcache.go index 89361bcc04..ffffbb3953 100644 --- a/vendor/github.com/moby/buildkit/solver/combinedcache.go +++ b/vendor/github.com/moby/buildkit/solver/combinedcache.go @@ -100,7 +100,7 @@ func (cm *combinedCacheManager) Save(key *CacheKey, s Result, createdAt time.Tim return cm.main.Save(key, s, createdAt) } -func (cm *combinedCacheManager) Records(ck *CacheKey) ([]*CacheRecord, error) { +func (cm *combinedCacheManager) Records(ctx context.Context, ck *CacheKey) ([]*CacheRecord, error) { if len(ck.ids) == 0 { return nil, errors.Errorf("no results") } @@ -112,7 +112,7 @@ func (cm *combinedCacheManager) Records(ck *CacheKey) ([]*CacheRecord, error) { for c := range ck.ids { func(c *cacheManager) { eg.Go(func() error { - recs, err := c.Records(ck) + recs, err := c.Records(ctx, ck) if err != nil { return err } diff --git a/vendor/github.com/moby/buildkit/solver/edge.go b/vendor/github.com/moby/buildkit/solver/edge.go index 5e3068010f..3e4ec18242 100644 --- a/vendor/github.com/moby/buildkit/solver/edge.go +++ b/vendor/github.com/moby/buildkit/solver/edge.go @@ -405,7 +405,7 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) { } else { for _, k := range keys { k.vtx = e.edge.Vertex.Digest() - records, err := e.op.Cache().Records(k) + records, err := e.op.Cache().Records(context.Background(), k) if err != nil { bklog.G(context.TODO()).Errorf("error receiving cache records: %v", err) continue @@ -583,7 +583,7 @@ func (e *edge) recalcCurrentState() { } } - records, err := e.op.Cache().Records(mergedKey) + records, err := e.op.Cache().Records(context.Background(), mergedKey) if err != nil { bklog.G(context.TODO()).Errorf("error receiving cache records: %v", err) continue diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/fronetendcap.go b/vendor/github.com/moby/buildkit/solver/errdefs/fronetendcap.go index e8af9ff233..aed3045bf1 100644 --- a/vendor/github.com/moby/buildkit/solver/errdefs/fronetendcap.go +++ b/vendor/github.com/moby/buildkit/solver/errdefs/fronetendcap.go @@ -3,7 +3,7 @@ package errdefs import ( fmt "fmt" - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" "github.com/moby/buildkit/util/grpcerrors" ) diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/solve.go b/vendor/github.com/moby/buildkit/solver/errdefs/solve.go index 3cbf8097ee..d7b9e7799a 100644 --- a/vendor/github.com/moby/buildkit/solver/errdefs/solve.go +++ b/vendor/github.com/moby/buildkit/solver/errdefs/solve.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" "github.com/golang/protobuf/jsonpb" //nolint:staticcheck "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/grpcerrors" diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/subrequest.go b/vendor/github.com/moby/buildkit/solver/errdefs/subrequest.go index b30eab3f66..8527f2a791 100644 --- a/vendor/github.com/moby/buildkit/solver/errdefs/subrequest.go +++ b/vendor/github.com/moby/buildkit/solver/errdefs/subrequest.go @@ -3,7 +3,7 @@ package errdefs import ( fmt "fmt" - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" "github.com/moby/buildkit/util/grpcerrors" ) diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/vertex.go b/vendor/github.com/moby/buildkit/solver/errdefs/vertex.go index 4ec375165d..5c2e03d133 100644 --- a/vendor/github.com/moby/buildkit/solver/errdefs/vertex.go +++ b/vendor/github.com/moby/buildkit/solver/errdefs/vertex.go @@ -1,7 +1,7 @@ package errdefs import ( - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" "github.com/moby/buildkit/util/grpcerrors" digest "github.com/opencontainers/go-digest" ) diff --git a/vendor/github.com/moby/buildkit/solver/jobs.go b/vendor/github.com/moby/buildkit/solver/jobs.go index 27e1534861..ec203257e3 100644 --- a/vendor/github.com/moby/buildkit/solver/jobs.go +++ b/vendor/github.com/moby/buildkit/solver/jobs.go @@ -342,6 +342,13 @@ func (jl *Solver) loadUnlocked(v, parent Vertex, j *Job, cache map[Vertex]Vertex // if same vertex is already loaded without cache just use that st, ok := jl.actives[dgstWithoutCache] + if ok { + // When matching an existing active vertext by dgstWithoutCache, set v to the + // existing active vertex, as otherwise the original vertex will use an + // incorrect digest and can incorrectly delete it while it is still in use. + v = st.vtx + } + if !ok { st, ok = jl.actives[dgst] @@ -533,6 +540,8 @@ func (wp *withProvenance) WalkProvenance(ctx context.Context, f func(ProvenanceP if wp.j == nil { return nil } + wp.j.list.mu.RLock() + defer wp.j.list.mu.RUnlock() m := map[digest.Digest]struct{}{} return wp.j.walkProvenance(ctx, wp.e, f, m) } @@ -652,9 +661,11 @@ type execRes struct { } type sharedOp struct { - resolver ResolveOpFunc - st *state - g flightcontrol.Group + resolver ResolveOpFunc + st *state + gDigest flightcontrol.Group[digest.Digest] + gCacheRes flightcontrol.Group[[]*CacheMap] + gExecRes flightcontrol.Group[*execRes] opOnce sync.Once op Op @@ -679,7 +690,18 @@ func (s *sharedOp) IgnoreCache() bool { } func (s *sharedOp) Cache() CacheManager { - return s.st.combinedCacheManager() + return &cacheWithCacheOpts{s.st.combinedCacheManager(), s.st} +} + +type cacheWithCacheOpts struct { + CacheManager + st *state +} + +func (c cacheWithCacheOpts) Records(ctx context.Context, ck *CacheKey) ([]*CacheRecord, error) { + // Allow Records accessing to cache opts through ctx. This enable to use remote provider + // during checking the cache existence. + return c.CacheManager.Records(withAncestorCacheOpts(ctx, c.st), ck) } func (s *sharedOp) LoadCache(ctx context.Context, rec *CacheRecord) (Result, error) { @@ -705,7 +727,7 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, p PreprocessF err = errdefs.WrapVertex(err, s.st.origDigest) }() flightControlKey := fmt.Sprintf("slow-compute-%d", index) - key, err := s.g.Do(ctx, flightControlKey, func(ctx context.Context) (interface{}, error) { + key, err := s.gDigest.Do(ctx, flightControlKey, func(ctx context.Context) (digest.Digest, error) { s.slowMu.Lock() // TODO: add helpers for these stored values if res, ok := s.slowCacheRes[index]; ok { @@ -714,7 +736,7 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, p PreprocessF } if err := s.slowCacheErr[index]; err != nil { s.slowMu.Unlock() - return nil, err + return "", err } s.slowMu.Unlock() @@ -722,7 +744,7 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, p PreprocessF if p != nil { st := s.st.solver.getState(s.st.vtx.Inputs()[index]) if st == nil { - return nil, errors.Errorf("failed to get state for index %d on %v", index, s.st.vtx.Name()) + return "", errors.Errorf("failed to get state for index %d on %v", index, s.st.vtx.Name()) } ctx2 := progress.WithProgress(ctx, st.mpw) if st.mspan.Span != nil { @@ -773,7 +795,7 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, p PreprocessF notifyCompleted(err, false) return "", err } - return key.(digest.Digest), nil + return key, nil } func (s *sharedOp) CacheMap(ctx context.Context, index int) (resp *cacheMapResp, err error) { @@ -786,7 +808,7 @@ func (s *sharedOp) CacheMap(ctx context.Context, index int) (resp *cacheMapResp, return nil, err } flightControlKey := fmt.Sprintf("cachemap-%d", index) - res, err := s.g.Do(ctx, flightControlKey, func(ctx context.Context) (ret interface{}, retErr error) { + res, err := s.gCacheRes.Do(ctx, flightControlKey, func(ctx context.Context) (ret []*CacheMap, retErr error) { if s.cacheRes != nil && s.cacheDone || index < len(s.cacheRes) { return s.cacheRes, nil } @@ -842,11 +864,11 @@ func (s *sharedOp) CacheMap(ctx context.Context, index int) (resp *cacheMapResp, return nil, err } - if len(res.([]*CacheMap)) <= index { + if len(res) <= index { return s.CacheMap(ctx, index) } - return &cacheMapResp{CacheMap: res.([]*CacheMap)[index], complete: s.cacheDone}, nil + return &cacheMapResp{CacheMap: res[index], complete: s.cacheDone}, nil } func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, exporters []ExportableCacheKey, err error) { @@ -859,7 +881,7 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, return nil, nil, err } flightControlKey := "exec" - res, err := s.g.Do(ctx, flightControlKey, func(ctx context.Context) (ret interface{}, retErr error) { + res, err := s.gExecRes.Do(ctx, flightControlKey, func(ctx context.Context) (ret *execRes, retErr error) { if s.execDone { if s.execErr != nil { return nil, s.execErr @@ -921,8 +943,7 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, if res == nil || err != nil { return nil, nil, err } - r := res.(*execRes) - return unwrapShared(r.execRes), r.execExporters, nil + return unwrapShared(res.execRes), res.execExporters, nil } func (s *sharedOp) getOp() (Op, error) { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go index 185fe81f06..27dc133620 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go @@ -155,7 +155,7 @@ type resultProxy struct { id string b *provenanceBridge req frontend.SolveRequest - g flightcontrol.Group + g flightcontrol.Group[solver.CachedResult] mu sync.Mutex released bool v solver.CachedResult @@ -177,6 +177,9 @@ func (rp *resultProxy) Definition() *pb.Definition { } func (rp *resultProxy) Provenance() interface{} { + if rp.provenance == nil { + return nil + } return rp.provenance } @@ -241,7 +244,7 @@ func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err defer func() { err = rp.wrapError(err) }() - r, err := rp.g.Do(ctx, "result", func(ctx context.Context) (interface{}, error) { + return rp.g.Do(ctx, "result", func(ctx context.Context) (solver.CachedResult, error) { rp.mu.Lock() if rp.released { rp.mu.Unlock() @@ -270,30 +273,27 @@ func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err rp.mu.Unlock() return nil, errors.Errorf("evaluating released result") } - rp.v = v - rp.err = err if err == nil { - capture, err := captureProvenance(ctx, v) - if err != nil && rp.err != nil { - rp.err = errors.Wrapf(rp.err, "failed to capture provenance: %v", err) + var capture *provenance.Capture + capture, err = captureProvenance(ctx, v) + if err != nil { + err = errors.Errorf("failed to capture provenance: %v", err) v.Release(context.TODO()) - rp.v = nil + v = nil } rp.provenance = capture } + rp.v = v + rp.err = err rp.mu.Unlock() return v, err }) - if r != nil { - return r.(solver.CachedResult), nil - } - return nil, err } -func (b *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) { +func (b *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (resolvedRef string, dgst digest.Digest, config []byte, err error) { w, err := b.resolveWorker() if err != nil { - return "", nil, err + return "", "", nil, err } if opt.LogName == "" { opt.LogName = fmt.Sprintf("resolve image config for %s", ref) @@ -304,11 +304,18 @@ func (b *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb. } else { id += platforms.Format(*platform) } + pol, err := loadSourcePolicy(b.builder) + if err != nil { + return "", "", nil, err + } + if pol != nil { + opt.SourcePolicies = append(opt.SourcePolicies, pol) + } err = inBuilderContext(ctx, b.builder, opt.LogName, id, func(ctx context.Context, g session.Group) error { - dgst, config, err = w.ResolveImageConfig(ctx, ref, opt, b.sm, g) + resolvedRef, dgst, config, err = w.ResolveImageConfig(ctx, ref, opt, b.sm, g) return err }) - return dgst, config, err + return resolvedRef, dgst, config, err } type lazyCacheManager struct { @@ -329,12 +336,12 @@ func (lcm *lazyCacheManager) Query(inp []solver.CacheKeyWithSelector, inputIndex } return lcm.main.Query(inp, inputIndex, dgst, outputIndex) } -func (lcm *lazyCacheManager) Records(ck *solver.CacheKey) ([]*solver.CacheRecord, error) { +func (lcm *lazyCacheManager) Records(ctx context.Context, ck *solver.CacheKey) ([]*solver.CacheRecord, error) { lcm.wait() if lcm.main == nil { return nil, nil } - return lcm.main.Records(ck) + return lcm.main.Records(ctx, ck) } func (lcm *lazyCacheManager) Load(ctx context.Context, rec *solver.CacheRecord) (solver.Result, error) { if err := lcm.wait(); err != nil { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go index 974c2e04e8..6212066cd9 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go @@ -5,6 +5,7 @@ import ( "log" "os" "path/filepath" + "runtime" "strings" "time" @@ -13,6 +14,7 @@ import ( "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/system" "github.com/pkg/errors" copy "github.com/tonistiigi/fsutil/copy" ) @@ -66,7 +68,7 @@ func mapUserToChowner(user *copy.User, idmap *idtools.IdentityMapping) (copy.Cho } func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.User, idmap *idtools.IdentityMapping) error { - p, err := fs.RootPath(d, filepath.Join("/", action.Path)) + p, err := fs.RootPath(d, action.Path) if err != nil { return err } @@ -126,7 +128,10 @@ func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *cop func rm(ctx context.Context, d string, action pb.FileActionRm) error { if action.AllowWildcard { - src := cleanPath(action.Path) + src, err := cleanPath(action.Path) + if err != nil { + return errors.Wrap(err, "cleaning path") + } m, err := copy.ResolveWildcards(d, src, false) if err != nil { return err @@ -167,9 +172,14 @@ func rmPath(root, src string, allowNotFound bool) error { } func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *copy.User, idmap *idtools.IdentityMapping) error { - srcPath := cleanPath(action.Src) - destPath := cleanPath(action.Dest) - + srcPath, err := cleanPath(action.Src) + if err != nil { + return errors.Wrap(err, "cleaning source path") + } + destPath, err := cleanPath(action.Dest) + if err != nil { + return errors.Wrap(err, "cleaning path") + } if !action.CreateDestPath { p, err := fs.RootPath(dest, filepath.Join("/", action.Dest)) if err != nil { @@ -244,19 +254,6 @@ func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u * return nil } -func cleanPath(s string) string { - s2 := filepath.Join("/", s) - if strings.HasSuffix(s, "/.") { - if s2 != "/" { - s2 += "/" - } - s2 += "." - } else if strings.HasSuffix(s, "/") && s2 != "/" { - s2 += "/" - } - return s2 -} - type Backend struct { } @@ -349,3 +346,21 @@ func (fb *Backend) Copy(ctx context.Context, m1, m2, user, group fileoptypes.Mou return docopy(ctx, src, dest, action, u, mnt2.m.IdentityMapping()) } + +func cleanPath(s string) (string, error) { + s, err := system.CheckSystemDriveAndRemoveDriveLetter(s, runtime.GOOS) + if err != nil { + return "", errors.Wrap(err, "removing drive letter") + } + s = filepath.FromSlash(s) + s2 := filepath.Join("/", s) + if strings.HasSuffix(s, string(filepath.Separator)+".") { + if s2 != string(filepath.Separator) { + s2 += string(filepath.Separator) + } + s2 += "." + } else if strings.HasSuffix(s, string(filepath.Separator)) && s2 != string(filepath.Separator) { + s2 += string(filepath.Separator) + } + return s2, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/history.go b/vendor/github.com/moby/buildkit/solver/llbsolver/history.go index 09aa19855e..ac0a5dd652 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/history.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/history.go @@ -4,9 +4,12 @@ import ( "bufio" "context" "encoding/binary" + "encoding/json" "io" "os" "sort" + "strconv" + "strings" "sync" "time" @@ -16,6 +19,8 @@ import ( controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client" "github.com/moby/buildkit/cmd/buildkitd/config" + "github.com/moby/buildkit/identity" + containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/util/leaseutil" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" @@ -25,23 +30,27 @@ import ( const ( recordsBucket = "_records" + versionBucket = "_version" ) type HistoryQueueOpt struct { DB *bolt.DB - LeaseManager leases.Manager - ContentStore content.Store + LeaseManager *leaseutil.Manager + ContentStore *containerdsnapshot.Store CleanConfig *config.HistoryConfig } type HistoryQueue struct { - mu sync.Mutex - initOnce sync.Once - HistoryQueueOpt - ps *pubsub[*controlapi.BuildHistoryEvent] - active map[string]*controlapi.BuildHistoryRecord - refs map[string]int - deleted map[string]struct{} + // mu protects active, refs and deleted maps + mu sync.Mutex + initOnce sync.Once + opt HistoryQueueOpt + ps *pubsub[*controlapi.BuildHistoryEvent] + active map[string]*controlapi.BuildHistoryRecord + refs map[string]int + deleted map[string]struct{} + hContentStore *containerdsnapshot.Store + hLeaseManager *leaseutil.Manager } type StatusImportResult struct { @@ -51,15 +60,15 @@ type StatusImportResult struct { NumTotalSteps int } -func NewHistoryQueue(opt HistoryQueueOpt) *HistoryQueue { +func NewHistoryQueue(opt HistoryQueueOpt) (*HistoryQueue, error) { if opt.CleanConfig == nil { opt.CleanConfig = &config.HistoryConfig{ - MaxAge: int64((48 * time.Hour).Seconds()), + MaxAge: config.Duration{Duration: 48 * time.Hour}, MaxEntries: 50, } } h := &HistoryQueue{ - HistoryQueueOpt: opt, + opt: opt, ps: &pubsub[*controlapi.BuildHistoryEvent]{ m: map[*channel[*controlapi.BuildHistoryEvent]]struct{}{}, }, @@ -68,6 +77,39 @@ func NewHistoryQueue(opt HistoryQueueOpt) *HistoryQueue { deleted: map[string]struct{}{}, } + ns := h.opt.ContentStore.Namespace() + // double check invalid configuration + ns2 := h.opt.LeaseManager.Namespace() + if ns != ns2 { + return nil, errors.Errorf("invalid configuration: content store namespace %q does not match lease manager namespace %q", ns, ns2) + } + h.hContentStore = h.opt.ContentStore.WithNamespace(ns + "_history") + h.hLeaseManager = h.opt.LeaseManager.WithNamespace(ns + "_history") + + // v2 migration: all records need to be on isolated containerd ns from rest of buildkit + needsMigration := false + if err := h.opt.DB.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(versionBucket)) + if b != nil { + v := b.Get([]byte("version")) + if v != nil { + vi, err := strconv.ParseInt(string(v), 10, 64) + if err == nil && vi > 1 { + return nil + } + } + } + needsMigration = true + return nil + }); err != nil { + return nil, err + } + if needsMigration { + if err := h.migrateV2(); err != nil { + return nil, err + } + } + go func() { for { h.gc() @@ -75,13 +117,158 @@ func NewHistoryQueue(opt HistoryQueueOpt) *HistoryQueue { } }() - return h + return h, nil +} + +func (h *HistoryQueue) migrateV2() error { + ctx := context.Background() + + if err := h.opt.DB.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(recordsBucket)) + if b == nil { + return nil + } + ctx, release, err := leaseutil.WithLease(ctx, h.hLeaseManager, leases.WithID("history_migration_"+identity.NewID()), leaseutil.MakeTemporary) + if err != nil { + return err + } + defer release(ctx) + return b.ForEach(func(key, dt []byte) error { + recs, err := h.opt.LeaseManager.ListResources(ctx, leases.Lease{ID: h.leaseID(string(key))}) + if err != nil { + if errdefs.IsNotFound(err) { + return nil + } + return err + } + recs2 := make([]leases.Resource, 0, len(recs)) + for _, r := range recs { + if r.Type == "content" { + if ok, err := h.migrateBlobV2(ctx, r.ID, false); err != nil { + return err + } else if ok { + recs2 = append(recs2, r) + } + } else { + return errors.Errorf("unknown resource type %q", r.Type) + } + } + + l, err := h.hLeaseManager.Create(ctx, leases.WithID(h.leaseID(string(key)))) + if err != nil { + if !errors.Is(err, errdefs.ErrAlreadyExists) { + return err + } + l = leases.Lease{ID: string(key)} + } + + for _, r := range recs2 { + if err := h.hLeaseManager.AddResource(ctx, l, r); err != nil { + return err + } + } + + return h.opt.LeaseManager.Delete(ctx, leases.Lease{ID: h.leaseID(string(key))}) + }) + }); err != nil { + return err + } + + if err := h.opt.DB.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte(versionBucket)) + if err != nil { + return err + } + return b.Put([]byte("version"), []byte("2")) + }); err != nil { + return err + } + + return nil +} + +func (h *HistoryQueue) blobRefs(ctx context.Context, dgst digest.Digest, detectSkipLayer bool) ([]digest.Digest, error) { + info, err := h.opt.ContentStore.Info(ctx, dgst) + if err != nil { + return nil, err // allow missing blobs + } + var out []digest.Digest + layers := map[digest.Digest]struct{}{} + if detectSkipLayer { + dt, err := content.ReadBlob(ctx, h.opt.ContentStore, ocispecs.Descriptor{ + Digest: dgst, + }) + if err != nil { + return nil, err + } + var mfst ocispecs.Manifest + if err := json.Unmarshal(dt, &mfst); err != nil { + return nil, err + } + for _, l := range mfst.Layers { + layers[l.Digest] = struct{}{} + } + } + for k, v := range info.Labels { + if !strings.HasPrefix(k, "containerd.io/gc.ref.content.") { + continue + } + dgst, err := digest.Parse(v) + if err != nil { + continue + } + if _, ok := layers[dgst]; ok { + continue + } + out = append(out, dgst) + } + return out, nil +} + +func (h *HistoryQueue) migrateBlobV2(ctx context.Context, id string, detectSkipLayers bool) (bool, error) { + dgst, err := digest.Parse(id) + if err != nil { + return false, err + } + + refs, _ := h.blobRefs(ctx, dgst, detectSkipLayers) // allow missing blobs + labels := map[string]string{} + for i, r := range refs { + labels["containerd.io/gc.ref.content."+strconv.Itoa(i)] = r.String() + } + + w, err := content.OpenWriter(ctx, h.hContentStore, content.WithDescriptor(ocispecs.Descriptor{ + Digest: dgst, + }), content.WithRef("history-migrate-"+id)) + if err != nil { + if errdefs.IsAlreadyExists(err) { + return true, nil + } + return false, err + } + defer w.Close() + ra, err := h.opt.ContentStore.ReaderAt(ctx, ocispecs.Descriptor{ + Digest: dgst, + }) + if err != nil { + return false, nil // allow skipping + } + defer ra.Close() + if err := content.Copy(ctx, w, &reader{ReaderAt: ra}, 0, dgst, content.WithLabels(labels)); err != nil { + return false, err + } + + for _, refs := range refs { + h.migrateBlobV2(ctx, refs.String(), detectSkipLayers) // allow missing blobs + } + + return true, nil } func (h *HistoryQueue) gc() error { var records []*controlapi.BuildHistoryRecord - if err := h.DB.View(func(tx *bolt.Tx) error { + if err := h.opt.DB.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(recordsBucket)) if b == nil { return nil @@ -102,7 +289,7 @@ func (h *HistoryQueue) gc() error { } // in order for record to get deleted by gc it exceed both maxentries and maxage criteria - if len(records) < int(h.CleanConfig.MaxEntries) { + if len(records) < int(h.opt.CleanConfig.MaxEntries) { return nil } @@ -115,8 +302,8 @@ func (h *HistoryQueue) gc() error { defer h.mu.Unlock() now := time.Now() - for _, r := range records[h.CleanConfig.MaxEntries:] { - if now.Add(time.Duration(h.CleanConfig.MaxAge) * -time.Second).After(*r.CompletedAt) { + for _, r := range records[h.opt.CleanConfig.MaxEntries:] { + if now.Add(-h.opt.CleanConfig.MaxAge.Duration).After(*r.CompletedAt) { if err := h.delete(r.Ref, false); err != nil { return err } @@ -132,7 +319,11 @@ func (h *HistoryQueue) delete(ref string, sync bool) error { return nil } delete(h.deleted, ref) - if err := h.DB.Update(func(tx *bolt.Tx) error { + h.ps.Send(&controlapi.BuildHistoryEvent{ + Type: controlapi.BuildHistoryEventType_DELETED, + Record: &controlapi.BuildHistoryRecord{Ref: ref}, + }) + if err := h.opt.DB.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(recordsBucket)) if b == nil { return os.ErrNotExist @@ -142,7 +333,7 @@ func (h *HistoryQueue) delete(ref string, sync bool) error { if sync { opts = append(opts, leases.SynchronousDelete) } - err2 := h.LeaseManager.Delete(context.TODO(), leases.Lease{ID: h.leaseID(ref)}, opts...) + err2 := h.hLeaseManager.Delete(context.TODO(), leases.Lease{ID: h.leaseID(ref)}, opts...) if err1 != nil { return err1 } @@ -156,7 +347,7 @@ func (h *HistoryQueue) delete(ref string, sync bool) error { func (h *HistoryQueue) init() error { var err error h.initOnce.Do(func() { - err = h.DB.Update(func(tx *bolt.Tx) error { + err = h.opt.DB.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucketIfNotExists([]byte(recordsBucket)) return err }) @@ -168,11 +359,27 @@ func (h *HistoryQueue) leaseID(id string) string { return "ref_" + id } -func (h *HistoryQueue) addResource(ctx context.Context, l leases.Lease, desc *controlapi.Descriptor) error { +func (h *HistoryQueue) addResource(ctx context.Context, l leases.Lease, desc *controlapi.Descriptor, detectSkipLayers bool) error { if desc == nil { return nil } - return h.LeaseManager.AddResource(ctx, l, leases.Resource{ + if _, err := h.hContentStore.Info(ctx, desc.Digest); err != nil { + if errdefs.IsNotFound(err) { + ctx, release, err := leaseutil.WithLease(ctx, h.hLeaseManager, leases.WithID("history_migration_"+identity.NewID()), leaseutil.MakeTemporary) + if err != nil { + return err + } + defer release(ctx) + ok, err := h.migrateBlobV2(ctx, string(desc.Digest), detectSkipLayers) + if err != nil { + return err + } + if !ok { + return errors.Errorf("unknown blob %s in history", desc.Digest) + } + } + } + return h.hLeaseManager.AddResource(ctx, l, leases.Resource{ ID: string(desc.Digest), Type: "content", }) @@ -183,7 +390,7 @@ func (h *HistoryQueue) UpdateRef(ctx context.Context, ref string, upt func(r *co defer h.mu.Unlock() var br controlapi.BuildHistoryRecord - if err := h.DB.View(func(tx *bolt.Tx) error { + if err := h.opt.DB.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(recordsBucket)) if b == nil { return os.ErrNotExist @@ -223,7 +430,7 @@ func (h *HistoryQueue) UpdateRef(ctx context.Context, ref string, upt func(r *co func (h *HistoryQueue) Status(ctx context.Context, ref string, st chan<- *client.SolveStatus) error { h.init() var br controlapi.BuildHistoryRecord - if err := h.DB.View(func(tx *bolt.Tx) error { + if err := h.opt.DB.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(recordsBucket)) if b == nil { return os.ErrNotExist @@ -245,7 +452,7 @@ func (h *HistoryQueue) Status(ctx context.Context, ref string, st chan<- *client return nil } - ra, err := h.ContentStore.ReaderAt(ctx, ocispecs.Descriptor{ + ra, err := h.hContentStore.ReaderAt(ctx, ocispecs.Descriptor{ Digest: br.Logs.Digest, Size: br.Logs.Size_, MediaType: br.Logs.MediaType, @@ -286,7 +493,7 @@ func (h *HistoryQueue) Status(ctx context.Context, ref string, st chan<- *client } func (h *HistoryQueue) update(ctx context.Context, rec controlapi.BuildHistoryRecord) error { - return h.DB.Update(func(tx *bolt.Tx) (err error) { + return h.opt.DB.Update(func(tx *bolt.Tx) (err error) { b := tx.Bucket([]byte(recordsBucket)) if b == nil { return nil @@ -296,7 +503,7 @@ func (h *HistoryQueue) update(ctx context.Context, rec controlapi.BuildHistoryRe return err } - l, err := h.LeaseManager.Create(ctx, leases.WithID(h.leaseID(rec.Ref))) + l, err := h.hLeaseManager.Create(ctx, leases.WithID(h.leaseID(rec.Ref))) created := true if err != nil { if !errors.Is(err, errdefs.ErrAlreadyExists) { @@ -308,32 +515,32 @@ func (h *HistoryQueue) update(ctx context.Context, rec controlapi.BuildHistoryRe defer func() { if err != nil && created { - h.LeaseManager.Delete(ctx, l) + h.hLeaseManager.Delete(ctx, l) } }() - if err := h.addResource(ctx, l, rec.Logs); err != nil { + if err := h.addResource(ctx, l, rec.Logs, false); err != nil { return err } - if err := h.addResource(ctx, l, rec.Trace); err != nil { + if err := h.addResource(ctx, l, rec.Trace, false); err != nil { return err } if rec.Result != nil { - if err := h.addResource(ctx, l, rec.Result.Result); err != nil { + if err := h.addResource(ctx, l, rec.Result.Result, true); err != nil { return err } for _, att := range rec.Result.Attestations { - if err := h.addResource(ctx, l, att); err != nil { + if err := h.addResource(ctx, l, att, false); err != nil { return err } } } for _, r := range rec.Results { - if err := h.addResource(ctx, l, r.Result); err != nil { + if err := h.addResource(ctx, l, r.Result, true); err != nil { return err } for _, att := range r.Attestations { - if err := h.addResource(ctx, l, att); err != nil { + if err := h.addResource(ctx, l, att, false); err != nil { return err } } @@ -371,27 +578,27 @@ func (h *HistoryQueue) Delete(ctx context.Context, ref string) error { } func (h *HistoryQueue) OpenBlobWriter(ctx context.Context, mt string) (_ *Writer, err error) { - l, err := h.LeaseManager.Create(ctx, leases.WithRandomID(), leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary) + l, err := h.hLeaseManager.Create(ctx, leases.WithRandomID(), leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary) if err != nil { return nil, err } defer func() { if err != nil { - h.LeaseManager.Delete(ctx, l) + h.hLeaseManager.Delete(ctx, l) } }() ctx = leases.WithLease(ctx, l.ID) - w, err := content.OpenWriter(ctx, h.ContentStore, content.WithRef("history-"+h.leaseID(l.ID))) + w, err := content.OpenWriter(ctx, h.hContentStore, content.WithRef("history-"+h.leaseID(l.ID))) if err != nil { return nil, err } return &Writer{ mt: mt, - lm: h.LeaseManager, + lm: h.hLeaseManager, l: l, w: w, dgstr: digest.Canonical.Digester(), @@ -424,7 +631,7 @@ func (w *Writer) Discard() { func (w *Writer) Commit(ctx context.Context) (*ocispecs.Descriptor, func(), error) { dgst := w.dgstr.Digest() sz := int64(w.sz) - if err := w.w.Commit(ctx, int64(w.sz), dgst); err != nil { + if err := w.w.Commit(leases.WithLease(ctx, w.l.ID), int64(w.sz), dgst); err != nil { if !errdefs.IsAlreadyExists(err) { w.Discard() return nil, nil, err @@ -559,7 +766,10 @@ func (h *HistoryQueue) Listen(ctx context.Context, req *controlapi.BuildHistoryR if req.Ref != "" && e.Ref != req.Ref { continue } - sub.ps.Send(&controlapi.BuildHistoryEvent{ + if _, ok := h.deleted[e.Ref]; ok { + continue + } + sub.send(&controlapi.BuildHistoryEvent{ Type: controlapi.BuildHistoryEventType_STARTED, Record: e, }) @@ -568,7 +778,8 @@ func (h *HistoryQueue) Listen(ctx context.Context, req *controlapi.BuildHistoryR h.mu.Unlock() if !req.ActiveOnly { - if err := h.DB.View(func(tx *bolt.Tx) error { + events := []*controlapi.BuildHistoryEvent{} + if err := h.opt.DB.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(recordsBucket)) if b == nil { return nil @@ -581,17 +792,31 @@ func (h *HistoryQueue) Listen(ctx context.Context, req *controlapi.BuildHistoryR if err := br.Unmarshal(dt); err != nil { return errors.Wrapf(err, "failed to unmarshal build record %s", key) } - if err := f(&controlapi.BuildHistoryEvent{ + events = append(events, &controlapi.BuildHistoryEvent{ Record: &br, Type: controlapi.BuildHistoryEventType_COMPLETE, - }); err != nil { - return err - } + }) return nil }) }); err != nil { return err } + // filter out records that have been marked for deletion + h.mu.Lock() + for i, e := range events { + if _, ok := h.deleted[e.Record.Ref]; ok { + events[i] = nil + } + } + h.mu.Unlock() + for _, e := range events { + if e.Record == nil { + continue + } + if err := f(e); err != nil { + return err + } + } } if req.EarlyExit { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go b/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go index 2cfeaae7a2..b61e7e3d1c 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go @@ -45,12 +45,16 @@ type MountManager struct { } func (mm *MountManager) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, m *pb.Mount, sharing pb.CacheSharingOpt, s session.Group) (mref cache.MutableRef, err error) { + name := fmt.Sprintf("cached mount %s from %s", m.Dest, mm.managerName) + if id != m.Dest { + name += fmt.Sprintf(" with id %q", id) + } g := &cacheRefGetter{ locker: &mm.cacheMountsMu, cacheMounts: mm.cacheMounts, cm: mm.cm, globalCacheRefs: sharedCacheRefs, - name: fmt.Sprintf("cached mount %s from %s", m.Dest, mm.managerName), + name: name, session: s, } return g.getRefCacheDir(ctx, ref, id, sharing) @@ -75,19 +79,19 @@ func (g *cacheRefGetter) getRefCacheDir(ctx context.Context, ref cache.Immutable defer mu.Unlock() if ref, ok := g.cacheMounts[key]; ok { - return ref.clone(), nil + return ref.clone(ctx), nil } defer func() { if err == nil { share := &cacheRefShare{MutableRef: mref, refs: map[*cacheRef]struct{}{}} g.cacheMounts[key] = share - mref = share.clone() + mref = share.clone(ctx) } }() switch sharing { case pb.CacheSharingOpt_SHARED: - return g.globalCacheRefs.get(key, func() (cache.MutableRef, error) { + return g.globalCacheRefs.get(ctx, key, func() (cache.MutableRef, error) { return g.getRefCacheDirNoCache(ctx, key, ref, id, false) }) case pb.CacheSharingOpt_PRIVATE: @@ -101,7 +105,12 @@ func (g *cacheRefGetter) getRefCacheDir(ctx context.Context, ref cache.Immutable func (g *cacheRefGetter) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, block bool) (cache.MutableRef, error) { makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) { - return g.cm.New(ctx, ref, g.session, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(g.name), cache.CachePolicyRetain) + newRef, err := g.cm.New(ctx, ref, g.session, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(g.name), cache.CachePolicyRetain) + if err != nil { + return nil, err + } + bklog.G(ctx).Debugf("created new ref for cache dir %q: %s", id, newRef.ID()) + return newRef, nil } cacheRefsLocker.Lock(key) @@ -114,10 +123,12 @@ func (g *cacheRefGetter) getRefCacheDirNoCache(ctx context.Context, key string, locked := false for _, si := range sis { if mRef, err := g.cm.GetMutable(ctx, si.ID()); err == nil { - bklog.G(ctx).Debugf("reusing ref for cache dir: %s", mRef.ID()) + bklog.G(ctx).Debugf("reusing ref for cache dir %q: %s", id, mRef.ID()) return mRef, nil } else if errors.Is(err, cache.ErrLocked) { locked = true + } else { + bklog.G(ctx).WithError(err).Errorf("failed to get reuse ref for cache dir %q: %s", id, si.ID()) } } if block && locked { @@ -438,7 +449,7 @@ func CacheMountsLocker() sync.Locker { return &sharedCacheRefs.mu } -func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) { +func (r *cacheRefs) get(ctx context.Context, key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) { r.mu.Lock() defer r.mu.Unlock() @@ -448,7 +459,7 @@ func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache. share, ok := r.shares[key] if ok { - return share.clone(), nil + return share.clone(ctx), nil } mref, err := fn() @@ -458,7 +469,7 @@ func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache. share = &cacheRefShare{MutableRef: mref, main: r, key: key, refs: map[*cacheRef]struct{}{}} r.shares[key] = share - return share.clone(), nil + return share.clone(ctx), nil } type cacheRefShare struct { @@ -469,7 +480,11 @@ type cacheRefShare struct { key string } -func (r *cacheRefShare) clone() cache.MutableRef { +func (r *cacheRefShare) clone(ctx context.Context) cache.MutableRef { + bklog.G(ctx).WithFields(map[string]any{ + "key": r.key, + "stack": bklog.LazyStackTrace{}, + }).Trace("cloning cache mount ref share") cacheRef := &cacheRef{cacheRefShare: r} if cacheRefCloneHijack != nil { cacheRefCloneHijack() @@ -481,6 +496,10 @@ func (r *cacheRefShare) clone() cache.MutableRef { } func (r *cacheRefShare) release(ctx context.Context) error { + bklog.G(ctx).WithFields(map[string]any{ + "key": r.key, + "stack": bklog.LazyStackTrace{}, + }).Trace("releasing cache mount ref share main") if r.main != nil { delete(r.main.shares, r.key) } @@ -495,6 +514,10 @@ type cacheRef struct { } func (r *cacheRef) Release(ctx context.Context) error { + bklog.G(ctx).WithFields(map[string]any{ + "key": r.key, + "stack": bklog.LazyStackTrace{}, + }).Trace("releasing cache mount ref share") if r.main != nil { r.main.mu.Lock() defer r.main.mu.Unlock() diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go index 2bee1283b4..eee0dd39fb 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go @@ -7,13 +7,15 @@ import ( "fmt" "os" "path" + "runtime" "sort" "strings" "github.com/containerd/containerd/platforms" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/frontend/gateway" + resourcestypes "github.com/moby/buildkit/executor/resources/types" + "github.com/moby/buildkit/frontend/gateway/container" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/secrets" "github.com/moby/buildkit/solver" @@ -43,6 +45,8 @@ type ExecOp struct { platform *pb.Platform numInputs int parallelism *semaphore.Weighted + rec resourcestypes.Recorder + digest digest.Digest } var _ solver.Op = &ExecOp{} @@ -62,9 +66,14 @@ func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache. w: w, platform: platform, parallelism: parallelism, + digest: v.Digest(), }, nil } +func (e *ExecOp) Digest() digest.Digest { + return e.digest +} + func (e *ExecOp) Proto() *pb.ExecOp { return e.op } @@ -252,10 +261,14 @@ func (e *ExecOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu } } - p, err := gateway.PrepareMounts(ctx, e.mm, e.cm, g, e.op.Meta.Cwd, e.op.Mounts, refs, func(m *pb.Mount, ref cache.ImmutableRef) (cache.MutableRef, error) { + platformOS := runtime.GOOS + if e.platform != nil { + platformOS = e.platform.OS + } + p, err := container.PrepareMounts(ctx, e.mm, e.cm, g, e.op.Meta.Cwd, e.op.Mounts, refs, func(m *pb.Mount, ref cache.ImmutableRef) (cache.MutableRef, error) { desc := fmt.Sprintf("mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")) return e.cm.New(ctx, ref, g, cache.WithDescription(desc)) - }) + }, platformOS) defer func() { if err != nil { execInputs := make([]solver.Result, len(e.op.Mounts)) @@ -299,7 +312,7 @@ func (e *ExecOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu return nil, err } - extraHosts, err := gateway.ParseExtraHosts(e.op.Meta.ExtraHosts) + extraHosts, err := container.ParseExtraHosts(e.op.Meta.ExtraHosts) if err != nil { return nil, err } @@ -357,7 +370,7 @@ func (e *ExecOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu } }() - execErr := e.exec.Run(ctx, "", p.Root, p.Mounts, executor.ProcessInfo{ + rec, execErr := e.exec.Run(ctx, "", p.Root, p.Mounts, executor.ProcessInfo{ Meta: meta, Stdin: nil, Stdout: stdout, @@ -377,6 +390,7 @@ func (e *ExecOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu // Prevent the result from being released. p.OutputRefs[i].Ref = nil } + e.rec = rec return results, errors.Wrapf(execErr, "process %q did not complete successfully", strings.Join(e.op.Meta.Args, " ")) } @@ -446,3 +460,10 @@ func (e *ExecOp) loadSecretEnv(ctx context.Context, g session.Group) ([]string, func (e *ExecOp) IsProvenanceProvider() { } + +func (e *ExecOp) Samples() (*resourcestypes.Samples, error) { + if e.rec == nil { + return nil, nil + } + return e.rec.Samples() +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go index 4f80ddfb65..db81201f1a 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go @@ -296,7 +296,7 @@ type FileOpSolver struct { mu sync.Mutex outs map[int]int ins map[int]input - g flightcontrol.Group + g flightcontrol.Group[input] } type input struct { @@ -405,7 +405,7 @@ func (s *FileOpSolver) validate(idx int, inputs []fileoptypes.Ref, actions []*pb } func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptypes.Ref, actions []*pb.FileAction, g session.Group) (input, error) { - inp, err := s.g.Do(ctx, fmt.Sprintf("inp-%d", idx), func(ctx context.Context) (_ interface{}, err error) { + return s.g.Do(ctx, fmt.Sprintf("inp-%d", idx), func(ctx context.Context) (_ input, err error) { s.mu.Lock() inp := s.ins[idx] s.mu.Unlock() @@ -547,17 +547,17 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp eg.Go(loadInput(ctx)) eg.Go(loadSecondaryInput(ctx)) if err := eg.Wait(); err != nil { - return nil, err + return input{}, err } } else { if action.Input != -1 { if err := loadInput(ctx)(); err != nil { - return nil, err + return input{}, err } } if action.SecondaryInput != -1 { if err := loadSecondaryInput(ctx)(); err != nil { - return nil, err + return input{}, err } } } @@ -565,7 +565,7 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp if inpMount == nil { m, err := s.r.Prepare(ctx, nil, false, g) if err != nil { - return nil, err + return input{}, err } inpMount = m } @@ -574,46 +574,46 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp case *pb.FileAction_Mkdir: user, group, err := loadOwner(ctx, a.Mkdir.Owner) if err != nil { - return nil, err + return input{}, err } if err := s.b.Mkdir(ctx, inpMount, user, group, *a.Mkdir); err != nil { - return nil, err + return input{}, err } case *pb.FileAction_Mkfile: user, group, err := loadOwner(ctx, a.Mkfile.Owner) if err != nil { - return nil, err + return input{}, err } if err := s.b.Mkfile(ctx, inpMount, user, group, *a.Mkfile); err != nil { - return nil, err + return input{}, err } case *pb.FileAction_Rm: if err := s.b.Rm(ctx, inpMount, *a.Rm); err != nil { - return nil, err + return input{}, err } case *pb.FileAction_Copy: if inpMountSecondary == nil { m, err := s.r.Prepare(ctx, nil, true, g) if err != nil { - return nil, err + return input{}, err } inpMountSecondary = m } user, group, err := loadOwner(ctx, a.Copy.Owner) if err != nil { - return nil, err + return input{}, err } if err := s.b.Copy(ctx, inpMountSecondary, inpMount, user, group, *a.Copy); err != nil { - return nil, err + return input{}, err } default: - return nil, errors.Errorf("invalid action type %T", action.Action) + return input{}, errors.Errorf("invalid action type %T", action.Action) } if inp.requiresCommit { ref, err := s.r.Commit(ctx, inpMount) if err != nil { - return nil, err + return input{}, err } inp.ref = ref } else { @@ -624,10 +624,6 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp s.mu.Unlock() return inp, nil }) - if err != nil { - return input{}, err - } - return inp.(input), err } func isDefaultIndexes(idxs [][]int) bool { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/proc/provenance.go b/vendor/github.com/moby/buildkit/solver/llbsolver/proc/provenance.go index 1af3af1960..ee29cceb05 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/proc/provenance.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/proc/provenance.go @@ -6,6 +6,7 @@ import ( "strconv" slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + "github.com/moby/buildkit/executor/resources" "github.com/moby/buildkit/exporter/containerimage/exptypes" gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/solver" @@ -15,7 +16,7 @@ import ( ) func ProvenanceProcessor(attrs map[string]string) llbsolver.Processor { - return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job) (*llbsolver.Result, error) { + return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job, usage *resources.SysSampler) (*llbsolver.Result, error) { ps, err := exptypes.ParsePlatforms(res.Metadata) if err != nil { return nil, err @@ -41,7 +42,7 @@ func ProvenanceProcessor(attrs map[string]string) llbsolver.Processor { return nil, errors.Errorf("could not find ref %s", p.ID) } - pc, err := llbsolver.NewProvenanceCreator(ctx, cp, ref, attrs, j) + pc, err := llbsolver.NewProvenanceCreator(ctx, cp, ref, attrs, j, usage) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/proc/sbom.go b/vendor/github.com/moby/buildkit/solver/llbsolver/proc/sbom.go index 0a99163a3a..20cdc71dae 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/proc/sbom.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/proc/sbom.go @@ -4,6 +4,7 @@ import ( "context" "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/executor/resources" "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend" "github.com/moby/buildkit/frontend/attestations/sbom" @@ -13,8 +14,8 @@ import ( "github.com/pkg/errors" ) -func SBOMProcessor(scannerRef string, useCache bool) llbsolver.Processor { - return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job) (*llbsolver.Result, error) { +func SBOMProcessor(scannerRef string, useCache bool, resolveMode string) llbsolver.Processor { + return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job, usage *resources.SysSampler) (*llbsolver.Result, error) { // skip sbom generation if we already have an sbom if sbom.HasSBOM(res.Result) { return res, nil @@ -25,7 +26,9 @@ func SBOMProcessor(scannerRef string, useCache bool) llbsolver.Processor { return nil, err } - scanner, err := sbom.CreateSBOMScanner(ctx, s.Bridge(j), scannerRef) + scanner, err := sbom.CreateSBOMScanner(ctx, s.Bridge(j), scannerRef, llb.ResolveImageConfigOpt{ + ResolveMode: resolveMode, + }) if err != nil { return nil, err } @@ -56,13 +59,13 @@ func SBOMProcessor(scannerRef string, useCache bool) llbsolver.Processor { if err != nil { return nil, err } - attSolve, err := result.ConvertAttestation(&att, func(st llb.State) (solver.ResultProxy, error) { + attSolve, err := result.ConvertAttestation(&att, func(st *llb.State) (solver.ResultProxy, error) { def, err := st.Marshal(ctx) if err != nil { return nil, err } - r, err := s.Bridge(j).Solve(ctx, frontend.SolveRequest{ // TODO: buildinfo + r, err := s.Bridge(j).Solve(ctx, frontend.SolveRequest{ Definition: def.ToPB(), }, j.SessionID) if err != nil { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance.go b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance.go index b30581c852..9138d6d9f8 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance.go @@ -12,6 +12,7 @@ import ( "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/executor/resources" "github.com/moby/buildkit/exporter/containerimage" "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend" @@ -130,18 +131,19 @@ func (b *provenanceBridge) findByResult(rp solver.ResultProxy) (*resultWithBridg return nil, false } -func (b *provenanceBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) { - dgst, config, err = b.llbBridge.ResolveImageConfig(ctx, ref, opt) +func (b *provenanceBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (resolvedRef string, dgst digest.Digest, config []byte, err error) { + ref, dgst, config, err = b.llbBridge.ResolveImageConfig(ctx, ref, opt) if err != nil { - return "", nil, err + return "", "", nil, err } b.images = append(b.images, provenance.ImageSource{ Ref: ref, Platform: opt.Platform, Digest: dgst, + Local: opt.ResolverType == llb.ResolverTypeOCILayout, }) - return dgst, config, nil + return ref, dgst, config, nil } func (b *provenanceBridge) Solve(ctx context.Context, req frontend.SolveRequest, sid string) (res *frontend.Result, err error) { @@ -322,10 +324,11 @@ func captureProvenance(ctx context.Context, res solver.CachedResultWithProvenanc if err != nil { return errors.Wrapf(err, "failed to parse OCI digest %s", pin) } - c.AddLocalImage(provenance.ImageSource{ + c.AddImage(provenance.ImageSource{ Ref: s.Reference.String(), Platform: s.Platform, Digest: dgst, + Local: true, }) default: return errors.Errorf("unknown source identifier %T", id) @@ -355,6 +358,13 @@ func captureProvenance(ctx context.Context, res solver.CachedResultWithProvenanc if pr.Network != pb.NetMode_NONE { c.NetworkAccess = true } + samples, err := op.Samples() + if err != nil { + return err + } + if samples != nil { + c.AddSamples(op.Digest(), samples) + } case *ops.BuildOp: c.IncompleteMaterials = true // not supported yet } @@ -369,10 +379,11 @@ func captureProvenance(ctx context.Context, res solver.CachedResultWithProvenanc type ProvenanceCreator struct { pr *provenance.ProvenancePredicate j *solver.Job + sampler *resources.SysSampler addLayers func() error } -func NewProvenanceCreator(ctx context.Context, cp *provenance.Capture, res solver.ResultProxy, attrs map[string]string, j *solver.Job) (*ProvenanceCreator, error) { +func NewProvenanceCreator(ctx context.Context, cp *provenance.Capture, res solver.ResultProxy, attrs map[string]string, j *solver.Job, usage *resources.SysSampler) (*ProvenanceCreator, error) { var reproducible bool if v, ok := attrs["reproducible"]; ok { b, err := strconv.ParseBool(v) @@ -394,6 +405,12 @@ func NewProvenanceCreator(ctx context.Context, cp *provenance.Capture, res solve } } + withUsage := false + if v, ok := attrs["capture-usage"]; ok { + b, err := strconv.ParseBool(v) + withUsage = err == nil && b + } + pr, err := provenance.NewPredicate(cp) if err != nil { return nil, err @@ -423,7 +440,7 @@ func NewProvenanceCreator(ctx context.Context, cp *provenance.Capture, res solve pr.Invocation.Parameters.Secrets = nil pr.Invocation.Parameters.SSH = nil case "max": - dgsts, err := provenance.AddBuildConfig(ctx, pr, res) + dgsts, err := AddBuildConfig(ctx, pr, cp, res, withUsage) if err != nil { return nil, err } @@ -478,11 +495,15 @@ func NewProvenanceCreator(ctx context.Context, cp *provenance.Capture, res solve return nil, errors.Errorf("invalid mode %q", mode) } - return &ProvenanceCreator{ + pc := &ProvenanceCreator{ pr: pr, j: j, addLayers: addLayers, - }, nil + } + if withUsage { + pc.sampler = usage + } + return pc, nil } func (p *ProvenanceCreator) Predicate() (*provenance.ProvenancePredicate, error) { @@ -495,6 +516,14 @@ func (p *ProvenanceCreator) Predicate() (*provenance.ProvenancePredicate, error) } } + if p.sampler != nil { + sysSamples, err := p.sampler.Close(true) + if err != nil { + return nil, err + } + p.pr.Metadata.BuildKitMetadata.SysUsage = sysSamples + } + return p.pr, nil } @@ -569,3 +598,161 @@ func resolveRemotes(ctx context.Context, res solver.Result) ([]*solver.Remote, e } return remotes, nil } + +func AddBuildConfig(ctx context.Context, p *provenance.ProvenancePredicate, c *provenance.Capture, rp solver.ResultProxy, withUsage bool) (map[digest.Digest]int, error) { + def := rp.Definition() + steps, indexes, err := toBuildSteps(def, c, withUsage) + if err != nil { + return nil, err + } + + bc := &provenance.BuildConfig{ + Definition: steps, + DigestMapping: digestMap(indexes), + } + + p.BuildConfig = bc + + if def.Source != nil { + sis := make([]provenance.SourceInfo, len(def.Source.Infos)) + for i, si := range def.Source.Infos { + steps, indexes, err := toBuildSteps(si.Definition, c, withUsage) + if err != nil { + return nil, err + } + s := provenance.SourceInfo{ + Filename: si.Filename, + Data: si.Data, + Language: si.Language, + Definition: steps, + DigestMapping: digestMap(indexes), + } + sis[i] = s + } + + if len(def.Source.Infos) != 0 { + locs := map[string]*pb.Locations{} + for k, l := range def.Source.Locations { + idx, ok := indexes[digest.Digest(k)] + if !ok { + continue + } + locs[fmt.Sprintf("step%d", idx)] = l + } + + if p.Metadata == nil { + p.Metadata = &provenance.ProvenanceMetadata{} + } + p.Metadata.BuildKitMetadata.Source = &provenance.Source{ + Infos: sis, + Locations: locs, + } + } + } + + return indexes, nil +} + +func digestMap(idx map[digest.Digest]int) map[digest.Digest]string { + m := map[digest.Digest]string{} + for k, v := range idx { + m[k] = fmt.Sprintf("step%d", v) + } + return m +} + +func toBuildSteps(def *pb.Definition, c *provenance.Capture, withUsage bool) ([]provenance.BuildStep, map[digest.Digest]int, error) { + if def == nil || len(def.Def) == 0 { + return nil, nil, nil + } + + ops := make(map[digest.Digest]*pb.Op) + defs := make(map[digest.Digest][]byte) + + var dgst digest.Digest + for _, dt := range def.Def { + var op pb.Op + if err := (&op).Unmarshal(dt); err != nil { + return nil, nil, errors.Wrap(err, "failed to parse llb proto op") + } + if src := op.GetSource(); src != nil { + for k := range src.Attrs { + if k == "local.session" || k == "local.unique" { + delete(src.Attrs, k) + } + } + } + dgst = digest.FromBytes(dt) + ops[dgst] = &op + defs[dgst] = dt + } + + if dgst == "" { + return nil, nil, nil + } + + // depth first backwards + dgsts := make([]digest.Digest, 0, len(def.Def)) + op := ops[dgst] + + if op.Op != nil { + return nil, nil, errors.Errorf("invalid last vertex: %T", op.Op) + } + + if len(op.Inputs) != 1 { + return nil, nil, errors.Errorf("invalid last vertex inputs: %v", len(op.Inputs)) + } + + visited := map[digest.Digest]struct{}{} + dgsts, err := walkDigests(dgsts, ops, dgst, visited) + if err != nil { + return nil, nil, err + } + indexes := map[digest.Digest]int{} + for i, dgst := range dgsts { + indexes[dgst] = i + } + + out := make([]provenance.BuildStep, 0, len(dgsts)) + for i, dgst := range dgsts { + op := *ops[dgst] + inputs := make([]string, len(op.Inputs)) + for i, inp := range op.Inputs { + inputs[i] = fmt.Sprintf("step%d:%d", indexes[inp.Digest], inp.Index) + } + op.Inputs = nil + s := provenance.BuildStep{ + ID: fmt.Sprintf("step%d", i), + Inputs: inputs, + Op: op, + } + if withUsage { + s.ResourceUsage = c.Samples[dgst] + } + out = append(out, s) + } + return out, indexes, nil +} + +func walkDigests(dgsts []digest.Digest, ops map[digest.Digest]*pb.Op, dgst digest.Digest, visited map[digest.Digest]struct{}) ([]digest.Digest, error) { + if _, ok := visited[dgst]; ok { + return dgsts, nil + } + op, ok := ops[dgst] + if !ok { + return nil, errors.Errorf("failed to find input %v", dgst) + } + if op == nil { + return nil, errors.Errorf("invalid nil input %v", dgst) + } + visited[dgst] = struct{}{} + for _, inp := range op.Inputs { + var err error + dgsts, err = walkDigests(dgsts, ops, inp.Digest, visited) + if err != nil { + return nil, err + } + } + dgsts = append(dgsts, dgst) + return dgsts, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/buildconfig.go b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/buildconfig.go index 4d9bf85ec1..8f903585be 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/buildconfig.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/buildconfig.go @@ -1,13 +1,9 @@ package provenance import ( - "context" - "fmt" - - "github.com/moby/buildkit/solver" + resourcestypes "github.com/moby/buildkit/executor/resources/types" "github.com/moby/buildkit/solver/pb" digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" ) type BuildConfig struct { @@ -16,9 +12,10 @@ type BuildConfig struct { } type BuildStep struct { - ID string `json:"id,omitempty"` - Op interface{} `json:"op,omitempty"` - Inputs []string `json:"inputs,omitempty"` + ID string `json:"id,omitempty"` + Op interface{} `json:"op,omitempty"` + Inputs []string `json:"inputs,omitempty"` + ResourceUsage *resourcestypes.Samples `json:"resourceUsage,omitempty"` } type Source struct { @@ -28,160 +25,8 @@ type Source struct { type SourceInfo struct { Filename string `json:"filename,omitempty"` + Language string `json:"language,omitempty"` Data []byte `json:"data,omitempty"` Definition []BuildStep `json:"llbDefinition,omitempty"` DigestMapping map[digest.Digest]string `json:"digestMapping,omitempty"` } - -func digestMap(idx map[digest.Digest]int) map[digest.Digest]string { - m := map[digest.Digest]string{} - for k, v := range idx { - m[k] = fmt.Sprintf("step%d", v) - } - return m -} - -func AddBuildConfig(ctx context.Context, p *ProvenancePredicate, rp solver.ResultProxy) (map[digest.Digest]int, error) { - def := rp.Definition() - steps, indexes, err := toBuildSteps(def) - if err != nil { - return nil, err - } - - bc := &BuildConfig{ - Definition: steps, - DigestMapping: digestMap(indexes), - } - - p.BuildConfig = bc - - if def.Source != nil { - sis := make([]SourceInfo, len(def.Source.Infos)) - for i, si := range def.Source.Infos { - steps, indexes, err := toBuildSteps(si.Definition) - if err != nil { - return nil, err - } - s := SourceInfo{ - Filename: si.Filename, - Data: si.Data, - Definition: steps, - DigestMapping: digestMap(indexes), - } - sis[i] = s - } - - if len(def.Source.Infos) != 0 { - locs := map[string]*pb.Locations{} - for k, l := range def.Source.Locations { - idx, ok := indexes[digest.Digest(k)] - if !ok { - continue - } - locs[fmt.Sprintf("step%d", idx)] = l - } - - if p.Metadata == nil { - p.Metadata = &ProvenanceMetadata{} - } - p.Metadata.BuildKitMetadata.Source = &Source{ - Infos: sis, - Locations: locs, - } - } - } - - return indexes, nil -} - -func toBuildSteps(def *pb.Definition) ([]BuildStep, map[digest.Digest]int, error) { - if def == nil || len(def.Def) == 0 { - return nil, nil, nil - } - - ops := make(map[digest.Digest]*pb.Op) - defs := make(map[digest.Digest][]byte) - - var dgst digest.Digest - for _, dt := range def.Def { - var op pb.Op - if err := (&op).Unmarshal(dt); err != nil { - return nil, nil, errors.Wrap(err, "failed to parse llb proto op") - } - if src := op.GetSource(); src != nil { - for k := range src.Attrs { - if k == "local.session" || k == "local.unique" { - delete(src.Attrs, k) - } - } - } - dgst = digest.FromBytes(dt) - ops[dgst] = &op - defs[dgst] = dt - } - - if dgst == "" { - return nil, nil, nil - } - - // depth first backwards - dgsts := make([]digest.Digest, 0, len(def.Def)) - op := ops[dgst] - - if op.Op != nil { - return nil, nil, errors.Errorf("invalid last vertex: %T", op.Op) - } - - if len(op.Inputs) != 1 { - return nil, nil, errors.Errorf("invalid last vertex inputs: %v", len(op.Inputs)) - } - - visited := map[digest.Digest]struct{}{} - dgsts, err := walkDigests(dgsts, ops, dgst, visited) - if err != nil { - return nil, nil, err - } - indexes := map[digest.Digest]int{} - for i, dgst := range dgsts { - indexes[dgst] = i - } - - out := make([]BuildStep, 0, len(dgsts)) - for i, dgst := range dgsts { - op := *ops[dgst] - inputs := make([]string, len(op.Inputs)) - for i, inp := range op.Inputs { - inputs[i] = fmt.Sprintf("step%d:%d", indexes[inp.Digest], inp.Index) - } - op.Inputs = nil - out = append(out, BuildStep{ - ID: fmt.Sprintf("step%d", i), - Inputs: inputs, - Op: op, - }) - } - return out, indexes, nil -} - -func walkDigests(dgsts []digest.Digest, ops map[digest.Digest]*pb.Op, dgst digest.Digest, visited map[digest.Digest]struct{}) ([]digest.Digest, error) { - if _, ok := visited[dgst]; ok { - return dgsts, nil - } - op, ok := ops[dgst] - if !ok { - return nil, errors.Errorf("failed to find input %v", dgst) - } - if op == nil { - return nil, errors.Errorf("invalid nil input %v", dgst) - } - visited[dgst] = struct{}{} - for _, inp := range op.Inputs { - var err error - dgsts, err = walkDigests(dgsts, ops, inp.Digest, visited) - if err != nil { - return nil, err - } - } - dgsts = append(dgsts, dgst) - return dgsts, nil -} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/capture.go b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/capture.go index 6252ebc3cf..f4d43fba4c 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/capture.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/capture.go @@ -4,6 +4,7 @@ import ( "sort" distreference "github.com/docker/distribution/reference" + resourcestypes "github.com/moby/buildkit/executor/resources/types" "github.com/moby/buildkit/solver/result" "github.com/moby/buildkit/util/urlutil" digest "github.com/opencontainers/go-digest" @@ -16,6 +17,7 @@ type ImageSource struct { Ref string Platform *ocispecs.Platform Digest digest.Digest + Local bool } type GitSource struct { @@ -43,11 +45,10 @@ type SSH struct { } type Sources struct { - Images []ImageSource - LocalImages []ImageSource - Git []GitSource - HTTP []HTTPSource - Local []LocalSource + Images []ImageSource + Git []GitSource + HTTP []HTTPSource + Local []LocalSource } type Capture struct { @@ -58,6 +59,7 @@ type Capture struct { SSH []SSH NetworkAccess bool IncompleteMaterials bool + Samples map[digest.Digest]*resourcestypes.Samples } func (c *Capture) Merge(c2 *Capture) error { @@ -67,9 +69,6 @@ func (c *Capture) Merge(c2 *Capture) error { for _, i := range c2.Sources.Images { c.AddImage(i) } - for _, i := range c2.Sources.LocalImages { - c.AddLocalImage(i) - } for _, l := range c2.Sources.Local { c.AddLocal(l) } @@ -98,9 +97,6 @@ func (c *Capture) Sort() { sort.Slice(c.Sources.Images, func(i, j int) bool { return c.Sources.Images[i].Ref < c.Sources.Images[j].Ref }) - sort.Slice(c.Sources.LocalImages, func(i, j int) bool { - return c.Sources.LocalImages[i].Ref < c.Sources.LocalImages[j].Ref - }) sort.Slice(c.Sources.Local, func(i, j int) bool { return c.Sources.Local[i].Name < c.Sources.Local[j].Name }) @@ -151,7 +147,7 @@ func (c *Capture) OptimizeImageSources() error { func (c *Capture) AddImage(i ImageSource) { for _, v := range c.Sources.Images { - if v.Ref == i.Ref { + if v.Ref == i.Ref && v.Local == i.Local { if v.Platform == i.Platform { return } @@ -165,22 +161,6 @@ func (c *Capture) AddImage(i ImageSource) { c.Sources.Images = append(c.Sources.Images, i) } -func (c *Capture) AddLocalImage(i ImageSource) { - for _, v := range c.Sources.LocalImages { - if v.Ref == i.Ref { - if v.Platform == i.Platform { - return - } - if v.Platform != nil && i.Platform != nil { - if v.Platform.Architecture == i.Platform.Architecture && v.Platform.OS == i.Platform.OS && v.Platform.Variant == i.Platform.Variant { - return - } - } - } - } - c.Sources.LocalImages = append(c.Sources.LocalImages, i) -} - func (c *Capture) AddLocal(l LocalSource) { for _, v := range c.Sources.Local { if v.Name == l.Name { @@ -237,6 +217,13 @@ func (c *Capture) AddSSH(s SSH) { c.SSH = append(c.SSH, s) } +func (c *Capture) AddSamples(dgst digest.Digest, samples *resourcestypes.Samples) { + if c.Samples == nil { + c.Samples = map[digest.Digest]*resourcestypes.Samples{} + } + c.Samples[dgst] = samples +} + func parseRefName(s string) (distreference.Named, string, error) { ref, err := distreference.ParseNormalizedNamed(s) if err != nil { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/predicate.go b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/predicate.go index f2f7c4e2ad..f07ce879d7 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/predicate.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/predicate.go @@ -6,6 +6,7 @@ import ( "github.com/containerd/containerd/platforms" slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + resourcetypes "github.com/moby/buildkit/executor/resources/types" "github.com/moby/buildkit/util/purl" "github.com/moby/buildkit/util/urlutil" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" @@ -50,17 +51,24 @@ type ProvenanceMetadata struct { } type BuildKitMetadata struct { - VCS map[string]string `json:"vcs,omitempty"` - Source *Source `json:"source,omitempty"` - Layers map[string][][]ocispecs.Descriptor `json:"layers,omitempty"` + VCS map[string]string `json:"vcs,omitempty"` + Source *Source `json:"source,omitempty"` + Layers map[string][][]ocispecs.Descriptor `json:"layers,omitempty"` + SysUsage []*resourcetypes.SysSample `json:"sysUsage,omitempty"` } func slsaMaterials(srcs Sources) ([]slsa.ProvenanceMaterial, error) { - count := len(srcs.Images) + len(srcs.Git) + len(srcs.HTTP) + len(srcs.LocalImages) + count := len(srcs.Images) + len(srcs.Git) + len(srcs.HTTP) out := make([]slsa.ProvenanceMaterial, 0, count) for _, s := range srcs.Images { - uri, err := purl.RefToPURL(s.Ref, s.Platform) + var uri string + var err error + if s.Local { + uri, err = purl.RefToPURL(packageurl.TypeOCI, s.Ref, s.Platform) + } else { + uri, err = purl.RefToPURL(packageurl.TypeDocker, s.Ref, s.Platform) + } if err != nil { return nil, err } @@ -93,26 +101,6 @@ func slsaMaterials(srcs Sources) ([]slsa.ProvenanceMaterial, error) { }) } - for _, s := range srcs.LocalImages { - q := []packageurl.Qualifier{} - if s.Platform != nil { - q = append(q, packageurl.Qualifier{ - Key: "platform", - Value: platforms.Format(*s.Platform), - }) - } - packageurl.NewPackageURL(packageurl.TypeOCI, "", s.Ref, "", q, "") - - material := slsa.ProvenanceMaterial{ - URI: s.Ref, - } - if s.Digest != "" { - material.Digest = slsa.DigestSet{ - s.Digest.Algorithm().String(): s.Digest.Hex(), - } - } - out = append(out, material) - } return out, nil } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go index d65a9e6490..9295e08c63 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go @@ -2,7 +2,6 @@ package llbsolver import ( "context" - "encoding/base64" "encoding/json" "fmt" "os" @@ -10,6 +9,7 @@ import ( "sync" "time" + intoto "github.com/in-toto/in-toto-golang/in_toto" slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/cache" @@ -17,6 +17,8 @@ import ( "github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/client" controlgateway "github.com/moby/buildkit/control/gateway" + "github.com/moby/buildkit/executor/resources" + resourcetypes "github.com/moby/buildkit/executor/resources/types" "github.com/moby/buildkit/exporter" "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend" @@ -27,8 +29,7 @@ import ( "github.com/moby/buildkit/solver/llbsolver/provenance" "github.com/moby/buildkit/solver/result" spb "github.com/moby/buildkit/sourcepolicy/pb" - "github.com/moby/buildkit/util/attestation" - "github.com/moby/buildkit/util/buildinfo" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/entitlements" "github.com/moby/buildkit/util/grpcerrors" @@ -37,7 +38,6 @@ import ( "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "go.opentelemetry.io/otel/sdk/trace/tracetest" "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" @@ -76,6 +76,7 @@ type Opt struct { SessionManager *session.Manager WorkerController *worker.Controller HistoryQueue *HistoryQueue + ResourceMonitor *resources.Monitor } type Solver struct { @@ -89,11 +90,12 @@ type Solver struct { sm *session.Manager entitlements []string history *HistoryQueue + sysSampler *resources.Sampler[*resourcetypes.SysSample] } // Processor defines a processing function to be applied after solving, but // before exporting -type Processor func(ctx context.Context, result *Result, s *Solver, j *solver.Job) (*Result, error) +type Processor func(ctx context.Context, result *Result, s *Solver, j *solver.Job, usage *resources.SysSampler) (*Result, error) func New(opt Opt) (*Solver, error) { s := &Solver{ @@ -108,6 +110,12 @@ func New(opt Opt) (*Solver, error) { history: opt.HistoryQueue, } + sampler, err := resources.NewSysSampler() + if err != nil { + return nil, err + } + s.sysSampler = sampler + s.solver = solver.NewSolver(solver.SolverOpt{ ResolveOpFunc: s.resolver(), DefaultCache: opt.CacheManager, @@ -141,7 +149,7 @@ func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge { return s.bridge(b) } -func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend.SolveRequest, exp ExporterRequest, j *solver.Job) (func(*Result, exporter.DescriptorReference, error) error, error) { +func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend.SolveRequest, exp ExporterRequest, j *solver.Job, usage *resources.SysSampler) (func(*Result, exporter.DescriptorReference, error) error, error) { var stopTrace func() []tracetest.SpanStub if s := trace.SpanFromContext(ctx); s.SpanContext().IsValid() { @@ -196,11 +204,12 @@ func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend var releasers []func() attrs := map[string]string{ - "mode": "max", + "mode": "max", + "capture-usage": "true", } makeProvenance := func(res solver.ResultProxy, cap *provenance.Capture) (*controlapi.Descriptor, func(), error) { - prc, err := NewProvenanceCreator(ctx2, cap, res, attrs, j) + prc, err := NewProvenanceCreator(ctx2, cap, res, attrs, j, usage) if err != nil { return nil, nil, err } @@ -212,7 +221,7 @@ func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend if err != nil { return nil, nil, err } - w, err := s.history.OpenBlobWriter(ctx, attestation.MediaTypeDockerSchema2AttestationType) + w, err := s.history.OpenBlobWriter(ctx, intoto.PayloadType) if err != nil { return nil, nil, err } @@ -333,7 +342,7 @@ func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend }() if err != nil { - st, ok := grpcerrors.AsGRPCStatus(grpcerrors.ToGRPC(err)) + st, ok := grpcerrors.AsGRPCStatus(grpcerrors.ToGRPC(ctx, err)) if !ok { st = status.New(codes.Unknown, err.Error()) } @@ -349,7 +358,7 @@ func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend } if stopTrace == nil { - logrus.Warn("no trace recorder found, skipping") + bklog.G(ctx).Warn("no trace recorder found, skipping") return err } go func() { @@ -391,7 +400,7 @@ func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend } return nil }(); err != nil { - logrus.Errorf("failed to save trace for %s: %+v", id, err) + bklog.G(ctx).Errorf("failed to save trace for %s: %+v", id, err) } }() @@ -407,6 +416,12 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro defer j.Discard() + var usage *resources.Sub[*resourcetypes.SysSample] + if s.sysSampler != nil { + usage = s.sysSampler.Record() + defer usage.Close(false) + } + var res *frontend.Result var resProv *Result var descref exporter.DescriptorReference @@ -453,7 +468,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro } if !internal { - rec, err1 := s.recordBuildHistory(ctx, id, req, exp, j) + rec, err1 := s.recordBuildHistory(ctx, id, req, exp, j, usage) if err1 != nil { defer j.CloseProgress() return nil, err1 @@ -510,7 +525,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro } for _, post := range post { - res2, err := post(ctx, resProv, s, j) + res2, err := post(ctx, resProv, s, j, usage) if err != nil { return nil, err } @@ -568,9 +583,6 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro if strings.HasPrefix(k, "frontend.") { exporterResponse[k] = string(v) } - if strings.HasPrefix(k, exptypes.ExporterBuildInfo) { - exporterResponse[k] = base64.StdEncoding.EncodeToString(v) - } } for k, v := range cacheExporterResponse { if strings.HasPrefix(k, "cache.") { @@ -700,9 +712,6 @@ func addProvenanceToResult(res *frontend.Result, br *provenanceBridge) (*Result, if res.Metadata == nil { res.Metadata = map[string][]byte{} } - if err := buildinfo.AddMetadata(res.Metadata, exptypes.ExporterBuildInfo, cp); err != nil { - return nil, err - } } if len(res.Refs) != 0 { @@ -717,9 +726,6 @@ func addProvenanceToResult(res *frontend.Result, br *provenanceBridge) (*Result, if res.Metadata == nil { res.Metadata = map[string][]byte{} } - if err := buildinfo.AddMetadata(res.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), cp); err != nil { - return nil, err - } } if len(res.Attestations) != 0 { @@ -746,8 +752,9 @@ func getRefProvenance(ref solver.ResultProxy, br *provenanceBridge) (*provenance } p := ref.Provenance() if p == nil { - return nil, errors.Errorf("missing provenance for %s", ref.ID()) + return nil, nil } + pr, ok := p.(*provenance.Capture) if !ok { return nil, errors.Errorf("invalid provenance type %T", p) @@ -892,6 +899,7 @@ func defaultResolver(wc *worker.Controller) ResolveWorkerFunc { return wc.GetDefault() } } + func allWorkers(wc *worker.Controller) func(func(w worker.Worker) error) error { return func(f func(worker.Worker) error) error { all, err := wc.List() @@ -917,27 +925,26 @@ func inBuilderContext(ctx context.Context, b solver.Builder, name, id string, f } return b.InContext(ctx, func(ctx context.Context, g session.Group) error { pw, _, ctx := progress.NewFromContext(ctx, progress.WithMetadata("vertex", v.Digest)) - notifyCompleted := notifyStarted(ctx, &v, false) + notifyCompleted := notifyStarted(ctx, &v) defer pw.Close() err := f(ctx, g) - notifyCompleted(err, false) + notifyCompleted(err) return err }) } -func notifyStarted(ctx context.Context, v *client.Vertex, cached bool) func(err error, cached bool) { +func notifyStarted(ctx context.Context, v *client.Vertex) func(err error) { pw, _, _ := progress.NewFromContext(ctx) start := time.Now() v.Started = &start v.Completed = nil - v.Cached = cached id := identity.NewID() pw.Write(id, *v) - return func(err error, cached bool) { + return func(err error) { defer pw.Close() stop := time.Now() v.Completed = &stop - v.Cached = cached + v.Cached = false if err != nil { v.Error = err.Error() } @@ -977,27 +984,21 @@ func loadEntitlements(b solver.Builder) (entitlements.Set, error) { } func loadSourcePolicy(b solver.Builder) (*spb.Policy, error) { - set := make(map[spb.Rule]struct{}, 0) + var srcPol spb.Policy err := b.EachValue(context.TODO(), keySourcePolicy, func(v interface{}) error { x, ok := v.(spb.Policy) if !ok { return errors.Errorf("invalid source policy %T", v) } for _, f := range x.Rules { - set[*f] = struct{}{} + r := *f + srcPol.Rules = append(srcPol.Rules, &r) } + srcPol.Version = x.Version return nil }) if err != nil { return nil, err } - var srcPol *spb.Policy - if len(set) > 0 { - srcPol = &spb.Policy{} - for k := range set { - k := k - srcPol.Rules = append(srcPol.Rules, &k) - } - } - return srcPol, nil + return &srcPol, nil } diff --git a/vendor/github.com/moby/buildkit/solver/memorycachestorage.go b/vendor/github.com/moby/buildkit/solver/memorycachestorage.go index fc50d82ad4..7fd1fa6268 100644 --- a/vendor/github.com/moby/buildkit/solver/memorycachestorage.go +++ b/vendor/github.com/moby/buildkit/solver/memorycachestorage.go @@ -303,7 +303,7 @@ func (s *inMemoryResultStore) LoadRemotes(_ context.Context, _ CacheResult, _ *c return nil, nil } -func (s *inMemoryResultStore) Exists(id string) bool { +func (s *inMemoryResultStore) Exists(ctx context.Context, id string) bool { _, ok := s.m.Load(id) return ok } diff --git a/vendor/github.com/moby/buildkit/solver/pb/caps.go b/vendor/github.com/moby/buildkit/solver/pb/caps.go index 02380a4bab..5e1963ff8f 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/caps.go +++ b/vendor/github.com/moby/buildkit/solver/pb/caps.go @@ -49,7 +49,7 @@ const ( CapExecMetaUlimit apicaps.CapID = "exec.meta.ulimit" CapExecMetaRemoveMountStubsRecursive apicaps.CapID = "exec.meta.removemountstubs.recursive" CapExecMountBind apicaps.CapID = "exec.mount.bind" - CapExecMountBindReadWriteNoOuput apicaps.CapID = "exec.mount.bind.readwrite-nooutput" + CapExecMountBindReadWriteNoOutput apicaps.CapID = "exec.mount.bind.readwrite-nooutput" CapExecMountCache apicaps.CapID = "exec.mount.cache" CapExecMountCacheSharing apicaps.CapID = "exec.mount.cache.sharing" CapExecMountSelector apicaps.CapID = "exec.mount.selector" @@ -288,7 +288,7 @@ func init() { }) Caps.Init(apicaps.Cap{ - ID: CapExecMountBindReadWriteNoOuput, + ID: CapExecMountBindReadWriteNoOutput, Enabled: true, Status: apicaps.CapStatusExperimental, }) diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go index e8afea0233..aadff21b64 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go @@ -1443,6 +1443,7 @@ type SourceInfo struct { Filename string `protobuf:"bytes,1,opt,name=filename,proto3" json:"filename,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` Definition *Definition `protobuf:"bytes,3,opt,name=definition,proto3" json:"definition,omitempty"` + Language string `protobuf:"bytes,4,opt,name=language,proto3" json:"language,omitempty"` } func (m *SourceInfo) Reset() { *m = SourceInfo{} } @@ -1495,6 +1496,13 @@ func (m *SourceInfo) GetDefinition() *Definition { return nil } +func (m *SourceInfo) GetLanguage() string { + if m != nil { + return m.Language + } + return "" +} + // Location defines list of areas in to source file type Location struct { SourceIndex int32 `protobuf:"varint,1,opt,name=sourceIndex,proto3" json:"sourceIndex,omitempty"` @@ -2842,168 +2850,169 @@ func init() { func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) } var fileDescriptor_8de16154b2733812 = []byte{ - // 2564 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0xcf, 0x6f, 0x5b, 0xc7, - 0xf1, 0x17, 0x7f, 0x93, 0x43, 0x89, 0x66, 0xd6, 0x4e, 0xc2, 0xe8, 0xeb, 0xaf, 0xac, 0xbc, 0xa4, - 0x81, 0x2c, 0xdb, 0x32, 0xaa, 0x00, 0x71, 0x60, 0x04, 0x45, 0x25, 0x91, 0x8e, 0x18, 0xc7, 0xa2, - 0xb0, 0xb4, 0x9d, 0x1e, 0x0a, 0x18, 0x4f, 0x8f, 0x4b, 0xe9, 0x41, 0x8f, 0x6f, 0x1f, 0xf6, 0x2d, - 0x2d, 0xb1, 0x87, 0x1e, 0xfa, 0x17, 0x04, 0x28, 0x50, 0xf4, 0x52, 0xf4, 0x9f, 0xe8, 0xb1, 0xbd, - 0x07, 0xc8, 0x25, 0x87, 0x1e, 0x82, 0x1e, 0xd2, 0xc2, 0xb9, 0xf4, 0x8f, 0x68, 0x81, 0x62, 0x66, - 0xf7, 0xfd, 0x20, 0x25, 0xc3, 0x71, 0x5b, 0xf4, 0xc4, 0x79, 0x33, 0x9f, 0x9d, 0x9d, 0x9d, 0x9d, - 0xd9, 0x99, 0x5d, 0x42, 0x43, 0x46, 0xf1, 0x56, 0xa4, 0xa4, 0x96, 0xac, 0x18, 0x1d, 0xad, 0xde, - 0x39, 0xf6, 0xf5, 0xc9, 0xf4, 0x68, 0xcb, 0x93, 0x93, 0xbb, 0xc7, 0xf2, 0x58, 0xde, 0x25, 0xd1, - 0xd1, 0x74, 0x4c, 0x5f, 0xf4, 0x41, 0x94, 0x19, 0xe2, 0xfc, 0xbd, 0x08, 0xc5, 0x41, 0xc4, 0xde, - 0x85, 0xaa, 0x1f, 0x46, 0x53, 0x1d, 0x77, 0x0a, 0xeb, 0xa5, 0x8d, 0xe6, 0x76, 0x63, 0x2b, 0x3a, - 0xda, 0xea, 0x23, 0x87, 0x5b, 0x01, 0x5b, 0x87, 0xb2, 0x38, 0x17, 0x5e, 0xa7, 0xb8, 0x5e, 0xd8, - 0x68, 0x6e, 0x03, 0x02, 0x7a, 0xe7, 0xc2, 0x1b, 0x44, 0xfb, 0x4b, 0x9c, 0x24, 0xec, 0x03, 0xa8, - 0xc6, 0x72, 0xaa, 0x3c, 0xd1, 0x29, 0x11, 0x66, 0x19, 0x31, 0x43, 0xe2, 0x10, 0xca, 0x4a, 0x51, - 0xd3, 0xd8, 0x0f, 0x44, 0xa7, 0x9c, 0x69, 0x7a, 0xe0, 0x07, 0x06, 0x43, 0x12, 0xf6, 0x1e, 0x54, - 0x8e, 0xa6, 0x7e, 0x30, 0xea, 0x54, 0x08, 0xd2, 0x44, 0xc8, 0x2e, 0x32, 0x08, 0x63, 0x64, 0x08, - 0x9a, 0x08, 0x75, 0x2c, 0x3a, 0xd5, 0x0c, 0xf4, 0x08, 0x19, 0x06, 0x44, 0x32, 0x9c, 0x6b, 0xe4, - 0x8f, 0xc7, 0x9d, 0x5a, 0x36, 0x57, 0xd7, 0x1f, 0x8f, 0xcd, 0x5c, 0x28, 0x61, 0x1b, 0x50, 0x8f, - 0x02, 0x57, 0x8f, 0xa5, 0x9a, 0x74, 0x20, 0xb3, 0xfb, 0xd0, 0xf2, 0x78, 0x2a, 0x65, 0xf7, 0xa0, - 0xe9, 0xc9, 0x30, 0xd6, 0xca, 0xf5, 0x43, 0x1d, 0x77, 0x9a, 0x04, 0x7e, 0x13, 0xc1, 0x5f, 0x48, - 0x75, 0x2a, 0xd4, 0x5e, 0x26, 0xe4, 0x79, 0xe4, 0x6e, 0x19, 0x8a, 0x32, 0x72, 0x7e, 0x53, 0x80, - 0x7a, 0xa2, 0x95, 0x39, 0xb0, 0xbc, 0xa3, 0xbc, 0x13, 0x5f, 0x0b, 0x4f, 0x4f, 0x95, 0xe8, 0x14, - 0xd6, 0x0b, 0x1b, 0x0d, 0x3e, 0xc7, 0x63, 0x2d, 0x28, 0x0e, 0x86, 0xe4, 0xef, 0x06, 0x2f, 0x0e, - 0x86, 0xac, 0x03, 0xb5, 0xa7, 0xae, 0xf2, 0xdd, 0x50, 0x93, 0x83, 0x1b, 0x3c, 0xf9, 0x64, 0xd7, - 0xa1, 0x31, 0x18, 0x3e, 0x15, 0x2a, 0xf6, 0x65, 0x48, 0x6e, 0x6d, 0xf0, 0x8c, 0xc1, 0xd6, 0x00, - 0x06, 0xc3, 0x07, 0xc2, 0x45, 0xa5, 0x71, 0xa7, 0xb2, 0x5e, 0xda, 0x68, 0xf0, 0x1c, 0xc7, 0xf9, - 0x25, 0x54, 0x68, 0xab, 0xd9, 0x67, 0x50, 0x1d, 0xf9, 0xc7, 0x22, 0xd6, 0xc6, 0x9c, 0xdd, 0xed, - 0xaf, 0xbe, 0xbb, 0xb1, 0xf4, 0x97, 0xef, 0x6e, 0x6c, 0xe6, 0x62, 0x4a, 0x46, 0x22, 0xf4, 0x64, - 0xa8, 0x5d, 0x3f, 0x14, 0x2a, 0xbe, 0x7b, 0x2c, 0xef, 0x98, 0x21, 0x5b, 0x5d, 0xfa, 0xe1, 0x56, - 0x03, 0xbb, 0x09, 0x15, 0x3f, 0x1c, 0x89, 0x73, 0xb2, 0xbf, 0xb4, 0x7b, 0xd5, 0xaa, 0x6a, 0x0e, - 0xa6, 0x3a, 0x9a, 0xea, 0x3e, 0x8a, 0xb8, 0x41, 0x38, 0x5f, 0x17, 0xa0, 0x6a, 0x42, 0x89, 0x5d, - 0x87, 0xf2, 0x44, 0x68, 0x97, 0xe6, 0x6f, 0x6e, 0xd7, 0xcd, 0x96, 0x6a, 0x97, 0x13, 0x17, 0xa3, - 0x74, 0x22, 0xa7, 0xe8, 0xfb, 0x62, 0x16, 0xa5, 0x8f, 0x90, 0xc3, 0xad, 0x80, 0xfd, 0x08, 0x6a, - 0xa1, 0xd0, 0x67, 0x52, 0x9d, 0x92, 0x8f, 0x5a, 0x26, 0x2c, 0x0e, 0x84, 0x7e, 0x24, 0x47, 0x82, - 0x27, 0x32, 0x76, 0x1b, 0xea, 0xb1, 0xf0, 0xa6, 0xca, 0xd7, 0x33, 0xf2, 0x57, 0x6b, 0xbb, 0x4d, - 0xc1, 0x6a, 0x79, 0x04, 0x4e, 0x11, 0xec, 0x16, 0x34, 0x62, 0xe1, 0x29, 0xa1, 0x45, 0xf8, 0x9c, - 0xfc, 0xd7, 0xdc, 0x5e, 0xb1, 0x70, 0x25, 0x74, 0x2f, 0x7c, 0xce, 0x33, 0xb9, 0xf3, 0x75, 0x11, - 0xca, 0x68, 0x33, 0x63, 0x50, 0x76, 0xd5, 0xb1, 0xc9, 0xa8, 0x06, 0x27, 0x9a, 0xb5, 0xa1, 0x84, - 0x3a, 0x8a, 0xc4, 0x42, 0x12, 0x39, 0xde, 0xd9, 0xc8, 0x6e, 0x28, 0x92, 0x38, 0x6e, 0x1a, 0x0b, - 0x65, 0xf7, 0x91, 0x68, 0x76, 0x13, 0x1a, 0x91, 0x92, 0xe7, 0xb3, 0x67, 0xc6, 0x82, 0x2c, 0x4a, - 0x91, 0x89, 0x06, 0xd4, 0x23, 0x4b, 0xb1, 0x4d, 0x00, 0x71, 0xae, 0x95, 0xbb, 0x2f, 0x63, 0x1d, - 0x77, 0xaa, 0x64, 0x2d, 0xc5, 0x3d, 0x32, 0xfa, 0x87, 0x3c, 0x27, 0x65, 0xab, 0x50, 0x3f, 0x91, - 0xb1, 0x0e, 0xdd, 0x89, 0xa0, 0x0c, 0x69, 0xf0, 0xf4, 0x9b, 0x39, 0x50, 0x9d, 0x06, 0xfe, 0xc4, - 0xd7, 0x9d, 0x46, 0xa6, 0xe3, 0x09, 0x71, 0xb8, 0x95, 0x60, 0x14, 0x7b, 0xc7, 0x4a, 0x4e, 0xa3, - 0x43, 0x57, 0x89, 0x50, 0x53, 0xfe, 0x34, 0xf8, 0x1c, 0x8f, 0x7d, 0x02, 0xef, 0x28, 0x31, 0x91, - 0xcf, 0x05, 0x6d, 0xd4, 0x50, 0x4f, 0x8f, 0x62, 0x8e, 0x8e, 0x8d, 0xfd, 0xe7, 0x82, 0x72, 0xa8, - 0xce, 0x5f, 0x0e, 0x70, 0x6e, 0x43, 0xd5, 0xd8, 0x8d, 0x6e, 0x41, 0xca, 0x66, 0x0a, 0xd1, 0x98, - 0x21, 0xfd, 0xc3, 0x24, 0x43, 0xfa, 0x87, 0x4e, 0x17, 0xaa, 0xc6, 0x42, 0x44, 0x1f, 0xe0, 0xaa, - 0x2c, 0x1a, 0x69, 0xe4, 0x0d, 0xe5, 0x58, 0x9b, 0x88, 0xe4, 0x44, 0x93, 0x56, 0x57, 0x19, 0xff, - 0x97, 0x38, 0xd1, 0xce, 0x43, 0x68, 0xa4, 0x3b, 0x4b, 0x53, 0x74, 0xad, 0x9a, 0x62, 0xbf, 0x8b, - 0x03, 0xc8, 0x5d, 0x66, 0x52, 0xa2, 0xd1, 0x8d, 0x32, 0xd2, 0xbe, 0x0c, 0xdd, 0x80, 0x14, 0xd5, - 0x79, 0xfa, 0xed, 0xfc, 0xb6, 0x04, 0x15, 0x5a, 0x18, 0xdb, 0xc0, 0x8c, 0x88, 0xa6, 0x66, 0x05, - 0xa5, 0x5d, 0x66, 0x33, 0x02, 0x28, 0xf7, 0xd2, 0x84, 0xc0, 0x3c, 0x5c, 0xc5, 0xe8, 0x0c, 0x84, - 0xa7, 0xa5, 0xb2, 0xf3, 0xa4, 0xdf, 0x38, 0xff, 0x08, 0x33, 0xd4, 0x04, 0x0c, 0xd1, 0xec, 0x16, - 0x54, 0x25, 0xa5, 0x15, 0xc5, 0xcc, 0x4b, 0x92, 0xcd, 0x42, 0x50, 0xb9, 0x12, 0xee, 0x48, 0x86, - 0xc1, 0x8c, 0x22, 0xa9, 0xce, 0xd3, 0x6f, 0x0c, 0x74, 0xca, 0xa3, 0xc7, 0xb3, 0xc8, 0x1c, 0xab, - 0x2d, 0x13, 0xe8, 0x8f, 0x12, 0x26, 0xcf, 0xe4, 0x78, 0x70, 0x3e, 0x9e, 0x44, 0xe3, 0x78, 0x10, - 0xe9, 0xce, 0xd5, 0x2c, 0x24, 0x13, 0x1e, 0x4f, 0xa5, 0x88, 0xf4, 0x5c, 0xef, 0x44, 0x20, 0xf2, - 0x5a, 0x86, 0xdc, 0xb3, 0x3c, 0x9e, 0x4a, 0xb3, 0x4c, 0x43, 0xe8, 0x9b, 0x04, 0xcd, 0x65, 0x1a, - 0x62, 0x33, 0x39, 0x46, 0xe8, 0x70, 0xb8, 0x8f, 0xc8, 0xb7, 0xb2, 0xd3, 0xdd, 0x70, 0xb8, 0x95, - 0x98, 0xd5, 0xc6, 0xd3, 0x40, 0xf7, 0xbb, 0x9d, 0xb7, 0x8d, 0x2b, 0x93, 0x6f, 0x67, 0x2d, 0x5b, - 0x00, 0xba, 0x35, 0xf6, 0x7f, 0x61, 0xe2, 0xa5, 0xc4, 0x89, 0x76, 0xfa, 0x50, 0x4f, 0x4c, 0xbc, - 0x10, 0x06, 0x77, 0xa0, 0x16, 0x9f, 0xb8, 0xca, 0x0f, 0x8f, 0x69, 0x87, 0x5a, 0xdb, 0x57, 0xd3, - 0x15, 0x0d, 0x0d, 0x1f, 0xad, 0x48, 0x30, 0x8e, 0x4c, 0x42, 0xea, 0x32, 0x5d, 0x6d, 0x28, 0x4d, - 0xfd, 0x11, 0xe9, 0x59, 0xe1, 0x48, 0x22, 0xe7, 0xd8, 0x37, 0x41, 0xb9, 0xc2, 0x91, 0x44, 0xfb, - 0x26, 0x72, 0x64, 0x6a, 0xe6, 0x0a, 0x27, 0x7a, 0x2e, 0xec, 0x2a, 0x0b, 0x61, 0x17, 0x24, 0xbe, - 0xf9, 0x9f, 0xcc, 0xf6, 0xeb, 0x02, 0xd4, 0x93, 0x42, 0x8f, 0xe5, 0xc6, 0x1f, 0x89, 0x50, 0xfb, - 0x63, 0x5f, 0x28, 0x3b, 0x71, 0x8e, 0xc3, 0xee, 0x40, 0xc5, 0xd5, 0x5a, 0x25, 0x87, 0xf8, 0xdb, - 0xf9, 0x2e, 0x61, 0x6b, 0x07, 0x25, 0xbd, 0x50, 0xab, 0x19, 0x37, 0xa8, 0xd5, 0x8f, 0x01, 0x32, - 0x26, 0xda, 0x7a, 0x2a, 0x66, 0x56, 0x2b, 0x92, 0xec, 0x1a, 0x54, 0x9e, 0xbb, 0xc1, 0x34, 0xc9, - 0x48, 0xf3, 0x71, 0xbf, 0xf8, 0x71, 0xc1, 0xf9, 0x53, 0x11, 0x6a, 0xb6, 0x6b, 0x60, 0xb7, 0xa1, - 0x46, 0x5d, 0x83, 0xb5, 0xe8, 0xf2, 0xf4, 0x4b, 0x20, 0xec, 0x6e, 0xda, 0x0e, 0xe5, 0x6c, 0xb4, - 0xaa, 0x4c, 0x5b, 0x64, 0x6d, 0xcc, 0x9a, 0xa3, 0xd2, 0x48, 0x8c, 0x6d, 0xdf, 0xd3, 0xa2, 0x2e, - 0x43, 0x8c, 0xfd, 0xd0, 0x47, 0xff, 0x70, 0x14, 0xb1, 0xdb, 0xc9, 0xaa, 0xcb, 0xa4, 0xf1, 0xad, - 0xbc, 0xc6, 0x8b, 0x8b, 0xee, 0x43, 0x33, 0x37, 0xcd, 0x25, 0xab, 0x7e, 0x3f, 0xbf, 0x6a, 0x3b, - 0x25, 0xa9, 0x33, 0x4d, 0x5b, 0xe6, 0x85, 0xff, 0xc0, 0x7f, 0x1f, 0x01, 0x64, 0x2a, 0x7f, 0xf8, - 0xf1, 0xe5, 0xfc, 0xb1, 0x04, 0x30, 0x88, 0xb0, 0x06, 0x8e, 0x5c, 0xaa, 0xda, 0xcb, 0xfe, 0x71, - 0x28, 0x95, 0x78, 0x46, 0x69, 0x4e, 0xe3, 0xeb, 0xbc, 0x69, 0x78, 0x94, 0x31, 0x6c, 0x07, 0x9a, - 0x23, 0x11, 0x7b, 0xca, 0xa7, 0x80, 0xb2, 0x4e, 0xbf, 0x81, 0x6b, 0xca, 0xf4, 0x6c, 0x75, 0x33, - 0x84, 0xf1, 0x55, 0x7e, 0x0c, 0xdb, 0x86, 0x65, 0x71, 0x1e, 0x49, 0xa5, 0xed, 0x2c, 0xa6, 0xb9, - 0xbc, 0x62, 0xda, 0x54, 0xe4, 0xd3, 0x4c, 0xbc, 0x29, 0xb2, 0x0f, 0xe6, 0x42, 0xd9, 0x73, 0xa3, - 0xd8, 0x96, 0xf4, 0xce, 0xc2, 0x7c, 0x7b, 0x6e, 0x64, 0x9c, 0xb6, 0xfb, 0x21, 0xae, 0xf5, 0x57, - 0x7f, 0xbd, 0x71, 0x2b, 0xd7, 0x07, 0x4d, 0xe4, 0xd1, 0xec, 0x2e, 0xc5, 0xcb, 0xa9, 0xaf, 0xef, - 0x4e, 0xb5, 0x1f, 0xdc, 0x75, 0x23, 0x1f, 0xd5, 0xe1, 0xc0, 0x7e, 0x97, 0x93, 0x6a, 0xf6, 0x31, - 0xb4, 0x22, 0x25, 0x8f, 0x95, 0x88, 0xe3, 0x67, 0x54, 0x15, 0x6d, 0xb7, 0xfa, 0x86, 0xad, 0xde, - 0x24, 0xf9, 0x14, 0x05, 0x7c, 0x25, 0xca, 0x7f, 0xae, 0xfe, 0x04, 0xda, 0x8b, 0x2b, 0x7e, 0x9d, - 0xdd, 0x5b, 0xbd, 0x07, 0x8d, 0x74, 0x05, 0xaf, 0x1a, 0x58, 0xcf, 0x6f, 0xfb, 0x1f, 0x0a, 0x50, - 0x35, 0xf9, 0xc8, 0xee, 0x41, 0x23, 0x90, 0x9e, 0x8b, 0x06, 0x24, 0x37, 0x83, 0x77, 0xb2, 0x74, - 0xdd, 0xfa, 0x3c, 0x91, 0x99, 0xfd, 0xc8, 0xb0, 0x18, 0x9e, 0x7e, 0x38, 0x96, 0x49, 0xfe, 0xb4, - 0xb2, 0x41, 0xfd, 0x70, 0x2c, 0xb9, 0x11, 0xae, 0x3e, 0x84, 0xd6, 0xbc, 0x8a, 0x4b, 0xec, 0x7c, - 0x6f, 0x3e, 0xd0, 0xa9, 0x1a, 0xa4, 0x83, 0xf2, 0x66, 0xdf, 0x83, 0x46, 0xca, 0x67, 0x9b, 0x17, - 0x0d, 0x5f, 0xce, 0x8f, 0xcc, 0xd9, 0xea, 0x04, 0x00, 0x99, 0x69, 0x78, 0xcc, 0xe1, 0x15, 0x24, - 0xcc, 0x9a, 0x87, 0xf4, 0x9b, 0x6a, 0xaf, 0xab, 0x5d, 0x32, 0x65, 0x99, 0x13, 0xcd, 0xb6, 0x00, - 0x46, 0x69, 0xaa, 0xbf, 0xe4, 0x00, 0xc8, 0x21, 0x9c, 0x01, 0xd4, 0x13, 0x23, 0xd8, 0x3a, 0x34, - 0x63, 0x3b, 0x33, 0x76, 0xca, 0x38, 0x5d, 0x85, 0xe7, 0x59, 0xd8, 0xf1, 0x2a, 0x37, 0x3c, 0x16, - 0x73, 0x1d, 0x2f, 0x47, 0x0e, 0xb7, 0x02, 0xe7, 0x0b, 0xa8, 0x10, 0x03, 0x13, 0x34, 0xd6, 0xae, - 0xd2, 0xb6, 0x79, 0x36, 0xfd, 0xa1, 0x8c, 0x69, 0xda, 0xdd, 0x32, 0x86, 0x30, 0x37, 0x00, 0xf6, - 0x3e, 0x76, 0xa1, 0x23, 0xeb, 0xd1, 0xcb, 0x70, 0x28, 0x76, 0x3e, 0x81, 0x7a, 0xc2, 0xc6, 0x95, - 0x07, 0x7e, 0x28, 0xac, 0x89, 0x44, 0xe3, 0xa5, 0xc3, 0x3b, 0x71, 0x95, 0xeb, 0x69, 0x61, 0xda, - 0x94, 0x0a, 0xcf, 0x18, 0xce, 0x7b, 0xd0, 0xcc, 0xe5, 0x1d, 0x86, 0xdb, 0x53, 0xda, 0x46, 0x93, - 0xfd, 0xe6, 0xc3, 0xf9, 0x14, 0x56, 0xe6, 0x72, 0x00, 0x8b, 0x95, 0x3f, 0x4a, 0x8a, 0x95, 0x29, - 0x44, 0x17, 0xba, 0x2d, 0x06, 0xe5, 0x33, 0xe1, 0x9e, 0xda, 0x4e, 0x8b, 0x68, 0xe7, 0xf7, 0x78, - 0xb7, 0x4a, 0x3a, 0xe0, 0xff, 0x07, 0x38, 0xd1, 0x3a, 0x7a, 0x46, 0x2d, 0xb1, 0x55, 0xd6, 0x40, - 0x0e, 0x21, 0xd8, 0x0d, 0x68, 0xe2, 0x47, 0x6c, 0xe5, 0x46, 0x35, 0x8d, 0x88, 0x0d, 0xe0, 0xff, - 0xa0, 0x31, 0x4e, 0x87, 0x97, 0x6c, 0x0c, 0x24, 0xa3, 0xdf, 0x81, 0x7a, 0x28, 0xad, 0xcc, 0x74, - 0xe8, 0xb5, 0x50, 0xa6, 0xe3, 0xdc, 0x20, 0xb0, 0xb2, 0x8a, 0x19, 0xe7, 0x06, 0x01, 0x09, 0x9d, - 0x5b, 0xf0, 0xc6, 0x85, 0x5b, 0x22, 0x7b, 0x0b, 0xaa, 0x63, 0x3f, 0xd0, 0x54, 0x94, 0xf0, 0x46, - 0x60, 0xbf, 0x9c, 0x7f, 0x16, 0x00, 0xb2, 0xf8, 0xc1, 0xac, 0xc0, 0xea, 0x82, 0x98, 0x65, 0x53, - 0x4d, 0x02, 0xa8, 0x4f, 0xec, 0x39, 0x65, 0x23, 0xe3, 0xfa, 0x7c, 0xcc, 0x6d, 0x25, 0xc7, 0x98, - 0x39, 0xc1, 0xb6, 0xed, 0x09, 0xf6, 0x3a, 0x37, 0xb9, 0x74, 0x06, 0x6a, 0xb4, 0xf2, 0x17, 0x7b, - 0xc8, 0xd2, 0x99, 0x5b, 0xc9, 0xea, 0x43, 0x58, 0x99, 0x9b, 0xf2, 0x07, 0xd6, 0xac, 0xec, 0xbc, - 0xcd, 0xe7, 0xf2, 0x36, 0x54, 0xcd, 0x8b, 0x00, 0xdb, 0x80, 0x9a, 0xeb, 0x99, 0x34, 0xce, 0x1d, - 0x25, 0x28, 0xdc, 0x21, 0x36, 0x4f, 0xc4, 0xce, 0x9f, 0x8b, 0x00, 0x19, 0xff, 0x35, 0xba, 0xed, - 0xfb, 0xd0, 0x8a, 0x85, 0x27, 0xc3, 0x91, 0xab, 0x66, 0x24, 0xb5, 0x57, 0xd6, 0xcb, 0x86, 0x2c, - 0x20, 0x73, 0x9d, 0x77, 0xe9, 0xd5, 0x9d, 0xf7, 0x06, 0x94, 0x3d, 0x19, 0xcd, 0x6c, 0x69, 0x62, - 0xf3, 0x0b, 0xd9, 0x93, 0xd1, 0x6c, 0x7f, 0x89, 0x13, 0x82, 0x6d, 0x41, 0x75, 0x72, 0x4a, 0x6f, - 0x24, 0xe6, 0xae, 0x77, 0x6d, 0x1e, 0xfb, 0xe8, 0x14, 0xe9, 0xfd, 0x25, 0x6e, 0x51, 0xec, 0x16, - 0x54, 0x26, 0xa7, 0x23, 0x5f, 0xd9, 0xe2, 0x72, 0x75, 0x11, 0xde, 0xf5, 0x15, 0x3d, 0x89, 0x20, - 0x86, 0x39, 0x50, 0x54, 0x13, 0xfb, 0x20, 0xd2, 0x5e, 0xf0, 0xe6, 0x64, 0x7f, 0x89, 0x17, 0xd5, - 0x64, 0xb7, 0x0e, 0x55, 0xe3, 0x57, 0xe7, 0x1f, 0x25, 0x68, 0xcd, 0x5b, 0x89, 0x3b, 0x1b, 0x2b, - 0x2f, 0xd9, 0xd9, 0x58, 0x79, 0xe9, 0xa5, 0xa4, 0x98, 0xbb, 0x94, 0x38, 0x50, 0x91, 0x67, 0xa1, - 0x50, 0xf9, 0xc7, 0xa0, 0xbd, 0x13, 0x79, 0x16, 0x62, 0x63, 0x6c, 0x44, 0x73, 0x7d, 0x66, 0xc5, - 0xf6, 0x99, 0xef, 0xc3, 0xca, 0x58, 0x06, 0x81, 0x3c, 0x1b, 0xce, 0x26, 0x81, 0x1f, 0x9e, 0xda, - 0x66, 0x73, 0x9e, 0xc9, 0x36, 0xe0, 0xca, 0xc8, 0x57, 0x68, 0xce, 0x9e, 0x0c, 0xb5, 0x08, 0xe9, - 0xaa, 0x8b, 0xb8, 0x45, 0x36, 0xfb, 0x0c, 0xd6, 0x5d, 0xad, 0xc5, 0x24, 0xd2, 0x4f, 0xc2, 0xc8, - 0xf5, 0x4e, 0xbb, 0xd2, 0xa3, 0x2c, 0x9c, 0x44, 0xae, 0xf6, 0x8f, 0xfc, 0xc0, 0xd7, 0x33, 0x72, - 0x46, 0x9d, 0xbf, 0x12, 0xc7, 0x3e, 0x80, 0x96, 0xa7, 0x84, 0xab, 0x45, 0x57, 0xc4, 0xfa, 0xd0, - 0xd5, 0x27, 0x9d, 0x3a, 0x8d, 0x5c, 0xe0, 0xe2, 0x1a, 0x5c, 0xb4, 0xf6, 0x0b, 0x3f, 0x18, 0x79, - 0x78, 0xbd, 0x6c, 0x98, 0x35, 0xcc, 0x31, 0xd9, 0x16, 0x30, 0x62, 0xf4, 0x26, 0x91, 0x9e, 0xa5, - 0x50, 0x20, 0xe8, 0x25, 0x12, 0x3c, 0x70, 0xb5, 0x3f, 0x11, 0xb1, 0x76, 0x27, 0x11, 0xdd, 0x9c, - 0x4b, 0x3c, 0x63, 0xb0, 0x9b, 0xd0, 0xf6, 0x43, 0x2f, 0x98, 0x8e, 0xc4, 0xb3, 0x08, 0x17, 0xa2, - 0xc2, 0xb8, 0xb3, 0x4c, 0xa7, 0xca, 0x15, 0xcb, 0x3f, 0xb4, 0x6c, 0x84, 0x8a, 0xf3, 0x05, 0xe8, - 0x8a, 0x81, 0x5a, 0x7e, 0x02, 0x75, 0xbe, 0x2c, 0x40, 0x7b, 0x31, 0xf0, 0x70, 0xdb, 0x22, 0x5c, - 0xbc, 0xbd, 0x5c, 0x23, 0x9d, 0x6e, 0x65, 0x31, 0xb7, 0x95, 0x49, 0xbd, 0x2c, 0xe5, 0xea, 0x65, - 0x1a, 0x16, 0xe5, 0x97, 0x87, 0xc5, 0xdc, 0x42, 0x2b, 0x0b, 0x0b, 0x75, 0x7e, 0x57, 0x80, 0x2b, - 0x0b, 0xc1, 0xfd, 0x83, 0x2d, 0x5a, 0x87, 0xe6, 0xc4, 0x3d, 0x15, 0xe6, 0x69, 0x22, 0xb6, 0x25, - 0x24, 0xcf, 0xfa, 0x2f, 0xd8, 0x17, 0xc2, 0x72, 0x3e, 0xa3, 0x2e, 0xb5, 0x2d, 0x09, 0x90, 0x03, - 0xa9, 0x1f, 0xc8, 0xa9, 0xad, 0xc5, 0x49, 0x80, 0x24, 0xcc, 0x8b, 0x61, 0x54, 0xba, 0x24, 0x8c, - 0x9c, 0x03, 0xa8, 0x27, 0x06, 0xb2, 0x1b, 0xf6, 0xed, 0xa8, 0x90, 0x3d, 0x89, 0x3e, 0x89, 0x85, - 0x42, 0xdb, 0xcd, 0x43, 0xd2, 0xbb, 0x50, 0x31, 0x6d, 0x68, 0xf1, 0x22, 0xc2, 0x48, 0x9c, 0x21, - 0xd4, 0x2c, 0x87, 0x6d, 0x42, 0xf5, 0x68, 0x96, 0xbe, 0xa3, 0xd8, 0xe3, 0x02, 0xbf, 0x47, 0x16, - 0x81, 0x67, 0x90, 0x41, 0xb0, 0x6b, 0x50, 0x3e, 0x9a, 0xf5, 0xbb, 0xe6, 0x62, 0x89, 0x27, 0x19, - 0x7e, 0xed, 0x56, 0x8d, 0x41, 0xce, 0xe7, 0xb0, 0x9c, 0x1f, 0x97, 0x16, 0xf6, 0x42, 0xae, 0xb0, - 0xa7, 0x47, 0x76, 0xf1, 0x55, 0x37, 0x8c, 0x8f, 0x00, 0xe8, 0xa5, 0xf7, 0x75, 0x6f, 0x26, 0x3f, - 0x86, 0x9a, 0x7d, 0x21, 0x66, 0x1f, 0x2c, 0xbc, 0x78, 0xb7, 0xd2, 0xe7, 0xe3, 0xb9, 0x67, 0x6f, - 0xe7, 0x3e, 0xf6, 0xa8, 0x67, 0x42, 0x75, 0xfd, 0xf1, 0xf8, 0x75, 0xa7, 0xbb, 0x0f, 0xad, 0x27, - 0x51, 0xf4, 0xef, 0x8d, 0xfd, 0x39, 0x54, 0xcd, 0x43, 0x35, 0x8e, 0x09, 0xd0, 0x02, 0xbb, 0x07, - 0xcc, 0xf4, 0xb1, 0x79, 0x93, 0xb8, 0x01, 0x20, 0x72, 0x8a, 0xf3, 0xd9, 0xcd, 0x25, 0xe4, 0xbc, - 0x01, 0xdc, 0x00, 0x36, 0x37, 0xa0, 0x66, 0xdf, 0x44, 0x59, 0x03, 0x2a, 0x4f, 0x0e, 0x86, 0xbd, - 0xc7, 0xed, 0x25, 0x56, 0x87, 0xf2, 0xfe, 0x60, 0xf8, 0xb8, 0x5d, 0x40, 0xea, 0x60, 0x70, 0xd0, - 0x6b, 0x17, 0x37, 0x6f, 0xc2, 0x72, 0xfe, 0x55, 0x94, 0x35, 0xa1, 0x36, 0xdc, 0x39, 0xe8, 0xee, - 0x0e, 0x7e, 0xd6, 0x5e, 0x62, 0xcb, 0x50, 0xef, 0x1f, 0x0c, 0x7b, 0x7b, 0x4f, 0x78, 0xaf, 0x5d, - 0xd8, 0xfc, 0x29, 0x34, 0xd2, 0x87, 0x22, 0xd4, 0xb0, 0xdb, 0x3f, 0xe8, 0xb6, 0x97, 0x18, 0x40, - 0x75, 0xd8, 0xdb, 0xe3, 0x3d, 0xd4, 0x5b, 0x83, 0xd2, 0x70, 0xb8, 0xdf, 0x2e, 0xe2, 0xac, 0x7b, - 0x3b, 0x7b, 0xfb, 0xbd, 0x76, 0x09, 0xc9, 0xc7, 0x8f, 0x0e, 0x1f, 0x0c, 0xdb, 0xe5, 0xcd, 0x8f, - 0xe0, 0xca, 0xc2, 0x13, 0x0a, 0x8d, 0xde, 0xdf, 0xe1, 0x3d, 0xd4, 0xd4, 0x84, 0xda, 0x21, 0xef, - 0x3f, 0xdd, 0x79, 0xdc, 0x6b, 0x17, 0x50, 0xf0, 0xf9, 0x60, 0xef, 0x61, 0xaf, 0xdb, 0x2e, 0xee, - 0x5e, 0xff, 0xea, 0xc5, 0x5a, 0xe1, 0x9b, 0x17, 0x6b, 0x85, 0x6f, 0x5f, 0xac, 0x15, 0xfe, 0xf6, - 0x62, 0xad, 0xf0, 0xe5, 0xf7, 0x6b, 0x4b, 0xdf, 0x7c, 0xbf, 0xb6, 0xf4, 0xed, 0xf7, 0x6b, 0x4b, - 0x47, 0x55, 0xfa, 0xab, 0xe3, 0xc3, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x4f, 0x06, 0xaa, - 0x2a, 0x19, 0x00, 0x00, + // 2577 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0x4f, 0x6f, 0x5b, 0xc7, + 0x11, 0x17, 0xff, 0x93, 0x43, 0x89, 0x66, 0xd6, 0x4e, 0xc2, 0xa8, 0xae, 0xac, 0xbc, 0xa4, 0x81, + 0x2c, 0xdb, 0x12, 0xaa, 0x00, 0x71, 0x60, 0x04, 0x45, 0x25, 0x91, 0x8e, 0x18, 0xc7, 0xa2, 0xb0, + 0xb4, 0x9d, 0x1e, 0x0a, 0x18, 0x4f, 0x8f, 0x4b, 0xea, 0x41, 0xef, 0xbd, 0x7d, 0x78, 0x6f, 0x69, + 0x89, 0x3d, 0xf4, 0xd0, 0x53, 0x8f, 0x01, 0x0a, 0x14, 0xbd, 0x14, 0xfd, 0x12, 0x3d, 0xb6, 0xf7, + 0x00, 0xb9, 0xe4, 0xd0, 0x43, 0xd0, 0x43, 0x5a, 0x38, 0x97, 0x7e, 0x88, 0x16, 0x28, 0x66, 0x76, + 0xdf, 0x1f, 0x52, 0x32, 0x6c, 0xb7, 0x45, 0x4f, 0x9c, 0x37, 0xf3, 0xdb, 0xd9, 0xd9, 0xd9, 0x99, + 0x9d, 0xd9, 0x25, 0x34, 0x64, 0x18, 0x6f, 0x85, 0x91, 0x54, 0x92, 0x15, 0xc3, 0xe3, 0xd5, 0x3b, + 0x13, 0x57, 0x9d, 0x4c, 0x8f, 0xb7, 0x1c, 0xe9, 0x6f, 0x4f, 0xe4, 0x44, 0x6e, 0x93, 0xe8, 0x78, + 0x3a, 0xa6, 0x2f, 0xfa, 0x20, 0x4a, 0x0f, 0xb1, 0xfe, 0x51, 0x84, 0xe2, 0x20, 0x64, 0xef, 0x42, + 0xd5, 0x0d, 0xc2, 0xa9, 0x8a, 0x3b, 0x85, 0xf5, 0xd2, 0x46, 0x73, 0xa7, 0xb1, 0x15, 0x1e, 0x6f, + 0xf5, 0x91, 0xc3, 0x8d, 0x80, 0xad, 0x43, 0x59, 0x9c, 0x0b, 0xa7, 0x53, 0x5c, 0x2f, 0x6c, 0x34, + 0x77, 0x00, 0x01, 0xbd, 0x73, 0xe1, 0x0c, 0xc2, 0x83, 0x25, 0x4e, 0x12, 0xf6, 0x01, 0x54, 0x63, + 0x39, 0x8d, 0x1c, 0xd1, 0x29, 0x11, 0x66, 0x19, 0x31, 0x43, 0xe2, 0x10, 0xca, 0x48, 0x51, 0xd3, + 0xd8, 0xf5, 0x44, 0xa7, 0x9c, 0x69, 0xba, 0xef, 0x7a, 0x1a, 0x43, 0x12, 0xf6, 0x1e, 0x54, 0x8e, + 0xa7, 0xae, 0x37, 0xea, 0x54, 0x08, 0xd2, 0x44, 0xc8, 0x1e, 0x32, 0x08, 0xa3, 0x65, 0x08, 0xf2, + 0x45, 0x34, 0x11, 0x9d, 0x6a, 0x06, 0x7a, 0x88, 0x0c, 0x0d, 0x22, 0x19, 0xce, 0x35, 0x72, 0xc7, + 0xe3, 0x4e, 0x2d, 0x9b, 0xab, 0xeb, 0x8e, 0xc7, 0x7a, 0x2e, 0x94, 0xb0, 0x0d, 0xa8, 0x87, 0x9e, + 0xad, 0xc6, 0x32, 0xf2, 0x3b, 0x90, 0xd9, 0x7d, 0x64, 0x78, 0x3c, 0x95, 0xb2, 0xbb, 0xd0, 0x74, + 0x64, 0x10, 0xab, 0xc8, 0x76, 0x03, 0x15, 0x77, 0x9a, 0x04, 0x7e, 0x13, 0xc1, 0x5f, 0xc8, 0xe8, + 0x54, 0x44, 0xfb, 0x99, 0x90, 0xe7, 0x91, 0x7b, 0x65, 0x28, 0xca, 0xd0, 0xfa, 0x6d, 0x01, 0xea, + 0x89, 0x56, 0x66, 0xc1, 0xf2, 0x6e, 0xe4, 0x9c, 0xb8, 0x4a, 0x38, 0x6a, 0x1a, 0x89, 0x4e, 0x61, + 0xbd, 0xb0, 0xd1, 0xe0, 0x73, 0x3c, 0xd6, 0x82, 0xe2, 0x60, 0x48, 0xfe, 0x6e, 0xf0, 0xe2, 0x60, + 0xc8, 0x3a, 0x50, 0x7b, 0x62, 0x47, 0xae, 0x1d, 0x28, 0x72, 0x70, 0x83, 0x27, 0x9f, 0xec, 0x3a, + 0x34, 0x06, 0xc3, 0x27, 0x22, 0x8a, 0x5d, 0x19, 0x90, 0x5b, 0x1b, 0x3c, 0x63, 0xb0, 0x35, 0x80, + 0xc1, 0xf0, 0xbe, 0xb0, 0x51, 0x69, 0xdc, 0xa9, 0xac, 0x97, 0x36, 0x1a, 0x3c, 0xc7, 0xb1, 0x7e, + 0x09, 0x15, 0xda, 0x6a, 0xf6, 0x19, 0x54, 0x47, 0xee, 0x44, 0xc4, 0x4a, 0x9b, 0xb3, 0xb7, 0xf3, + 0xd5, 0x77, 0x37, 0x96, 0xfe, 0xfa, 0xdd, 0x8d, 0xcd, 0x5c, 0x4c, 0xc9, 0x50, 0x04, 0x8e, 0x0c, + 0x94, 0xed, 0x06, 0x22, 0x8a, 0xb7, 0x27, 0xf2, 0x8e, 0x1e, 0xb2, 0xd5, 0xa5, 0x1f, 0x6e, 0x34, + 0xb0, 0x9b, 0x50, 0x71, 0x83, 0x91, 0x38, 0x27, 0xfb, 0x4b, 0x7b, 0x57, 0x8d, 0xaa, 0xe6, 0x60, + 0xaa, 0xc2, 0xa9, 0xea, 0xa3, 0x88, 0x6b, 0x84, 0xf5, 0x75, 0x01, 0xaa, 0x3a, 0x94, 0xd8, 0x75, + 0x28, 0xfb, 0x42, 0xd9, 0x34, 0x7f, 0x73, 0xa7, 0xae, 0xb7, 0x54, 0xd9, 0x9c, 0xb8, 0x18, 0xa5, + 0xbe, 0x9c, 0xa2, 0xef, 0x8b, 0x59, 0x94, 0x3e, 0x44, 0x0e, 0x37, 0x02, 0xf6, 0x23, 0xa8, 0x05, + 0x42, 0x9d, 0xc9, 0xe8, 0x94, 0x7c, 0xd4, 0xd2, 0x61, 0x71, 0x28, 0xd4, 0x43, 0x39, 0x12, 0x3c, + 0x91, 0xb1, 0xdb, 0x50, 0x8f, 0x85, 0x33, 0x8d, 0x5c, 0x35, 0x23, 0x7f, 0xb5, 0x76, 0xda, 0x14, + 0xac, 0x86, 0x47, 0xe0, 0x14, 0xc1, 0x6e, 0x41, 0x23, 0x16, 0x4e, 0x24, 0x94, 0x08, 0x9e, 0x91, + 0xff, 0x9a, 0x3b, 0x2b, 0x06, 0x1e, 0x09, 0xd5, 0x0b, 0x9e, 0xf1, 0x4c, 0x6e, 0x7d, 0x5d, 0x84, + 0x32, 0xda, 0xcc, 0x18, 0x94, 0xed, 0x68, 0xa2, 0x33, 0xaa, 0xc1, 0x89, 0x66, 0x6d, 0x28, 0xa1, + 0x8e, 0x22, 0xb1, 0x90, 0x44, 0x8e, 0x73, 0x36, 0x32, 0x1b, 0x8a, 0x24, 0x8e, 0x9b, 0xc6, 0x22, + 0x32, 0xfb, 0x48, 0x34, 0xbb, 0x09, 0x8d, 0x30, 0x92, 0xe7, 0xb3, 0xa7, 0xda, 0x82, 0x2c, 0x4a, + 0x91, 0x89, 0x06, 0xd4, 0x43, 0x43, 0xb1, 0x4d, 0x00, 0x71, 0xae, 0x22, 0xfb, 0x40, 0xc6, 0x2a, + 0xee, 0x54, 0xc9, 0x5a, 0x8a, 0x7b, 0x64, 0xf4, 0x8f, 0x78, 0x4e, 0xca, 0x56, 0xa1, 0x7e, 0x22, + 0x63, 0x15, 0xd8, 0xbe, 0xa0, 0x0c, 0x69, 0xf0, 0xf4, 0x9b, 0x59, 0x50, 0x9d, 0x7a, 0xae, 0xef, + 0xaa, 0x4e, 0x23, 0xd3, 0xf1, 0x98, 0x38, 0xdc, 0x48, 0x30, 0x8a, 0x9d, 0x49, 0x24, 0xa7, 0xe1, + 0x91, 0x1d, 0x89, 0x40, 0x51, 0xfe, 0x34, 0xf8, 0x1c, 0x8f, 0x7d, 0x02, 0xef, 0x44, 0xc2, 0x97, + 0xcf, 0x04, 0x6d, 0xd4, 0x50, 0x4d, 0x8f, 0x63, 0x8e, 0x8e, 0x8d, 0xdd, 0x67, 0x82, 0x72, 0xa8, + 0xce, 0x5f, 0x0c, 0xb0, 0x6e, 0x43, 0x55, 0xdb, 0x8d, 0x6e, 0x41, 0xca, 0x64, 0x0a, 0xd1, 0x98, + 0x21, 0xfd, 0xa3, 0x24, 0x43, 0xfa, 0x47, 0x56, 0x17, 0xaa, 0xda, 0x42, 0x44, 0x1f, 0xe2, 0xaa, + 0x0c, 0x1a, 0x69, 0xe4, 0x0d, 0xe5, 0x58, 0xe9, 0x88, 0xe4, 0x44, 0x93, 0x56, 0x3b, 0xd2, 0xfe, + 0x2f, 0x71, 0xa2, 0xad, 0x07, 0xd0, 0x48, 0x77, 0x96, 0xa6, 0xe8, 0x1a, 0x35, 0xc5, 0x7e, 0x17, + 0x07, 0x90, 0xbb, 0xf4, 0xa4, 0x44, 0xa3, 0x1b, 0x65, 0xa8, 0x5c, 0x19, 0xd8, 0x1e, 0x29, 0xaa, + 0xf3, 0xf4, 0xdb, 0xfa, 0x5d, 0x09, 0x2a, 0xb4, 0x30, 0xb6, 0x81, 0x19, 0x11, 0x4e, 0xf5, 0x0a, + 0x4a, 0x7b, 0xcc, 0x64, 0x04, 0x50, 0xee, 0xa5, 0x09, 0x81, 0x79, 0xb8, 0x8a, 0xd1, 0xe9, 0x09, + 0x47, 0xc9, 0xc8, 0xcc, 0x93, 0x7e, 0xe3, 0xfc, 0x23, 0xcc, 0x50, 0x1d, 0x30, 0x44, 0xb3, 0x5b, + 0x50, 0x95, 0x94, 0x56, 0x14, 0x33, 0x2f, 0x48, 0x36, 0x03, 0x41, 0xe5, 0x91, 0xb0, 0x47, 0x32, + 0xf0, 0x66, 0x14, 0x49, 0x75, 0x9e, 0x7e, 0x63, 0xa0, 0x53, 0x1e, 0x3d, 0x9a, 0x85, 0xfa, 0x58, + 0x6d, 0xe9, 0x40, 0x7f, 0x98, 0x30, 0x79, 0x26, 0xc7, 0x83, 0xf3, 0x91, 0x1f, 0x8e, 0xe3, 0x41, + 0xa8, 0x3a, 0x57, 0xb3, 0x90, 0x4c, 0x78, 0x3c, 0x95, 0x22, 0xd2, 0xb1, 0x9d, 0x13, 0x81, 0xc8, + 0x6b, 0x19, 0x72, 0xdf, 0xf0, 0x78, 0x2a, 0xcd, 0x32, 0x0d, 0xa1, 0x6f, 0x12, 0x34, 0x97, 0x69, + 0x88, 0xcd, 0xe4, 0x18, 0xa1, 0xc3, 0xe1, 0x01, 0x22, 0xdf, 0xca, 0x4e, 0x77, 0xcd, 0xe1, 0x46, + 0xa2, 0x57, 0x1b, 0x4f, 0x3d, 0xd5, 0xef, 0x76, 0xde, 0xd6, 0xae, 0x4c, 0xbe, 0xad, 0xb5, 0x6c, + 0x01, 0xe8, 0xd6, 0xd8, 0xfd, 0x85, 0x8e, 0x97, 0x12, 0x27, 0xda, 0xea, 0x43, 0x3d, 0x31, 0xf1, + 0x42, 0x18, 0xdc, 0x81, 0x5a, 0x7c, 0x62, 0x47, 0x6e, 0x30, 0xa1, 0x1d, 0x6a, 0xed, 0x5c, 0x4d, + 0x57, 0x34, 0xd4, 0x7c, 0xb4, 0x22, 0xc1, 0x58, 0x32, 0x09, 0xa9, 0xcb, 0x74, 0xb5, 0xa1, 0x34, + 0x75, 0x47, 0xa4, 0x67, 0x85, 0x23, 0x89, 0x9c, 0x89, 0xab, 0x83, 0x72, 0x85, 0x23, 0x89, 0xf6, + 0xf9, 0x72, 0xa4, 0x6b, 0xe6, 0x0a, 0x27, 0x7a, 0x2e, 0xec, 0x2a, 0x0b, 0x61, 0xe7, 0x25, 0xbe, + 0xf9, 0xbf, 0xcc, 0xf6, 0x9b, 0x02, 0xd4, 0x93, 0x42, 0x8f, 0xe5, 0xc6, 0x1d, 0x89, 0x40, 0xb9, + 0x63, 0x57, 0x44, 0x66, 0xe2, 0x1c, 0x87, 0xdd, 0x81, 0x8a, 0xad, 0x54, 0x94, 0x1c, 0xe2, 0x6f, + 0xe7, 0xbb, 0x84, 0xad, 0x5d, 0x94, 0xf4, 0x02, 0x15, 0xcd, 0xb8, 0x46, 0xad, 0x7e, 0x0c, 0x90, + 0x31, 0xd1, 0xd6, 0x53, 0x31, 0x33, 0x5a, 0x91, 0x64, 0xd7, 0xa0, 0xf2, 0xcc, 0xf6, 0xa6, 0x49, + 0x46, 0xea, 0x8f, 0x7b, 0xc5, 0x8f, 0x0b, 0xd6, 0x9f, 0x8b, 0x50, 0x33, 0x5d, 0x03, 0xbb, 0x0d, + 0x35, 0xea, 0x1a, 0x8c, 0x45, 0x97, 0xa7, 0x5f, 0x02, 0x61, 0xdb, 0x69, 0x3b, 0x94, 0xb3, 0xd1, + 0xa8, 0xd2, 0x6d, 0x91, 0xb1, 0x31, 0x6b, 0x8e, 0x4a, 0x23, 0x31, 0x36, 0x7d, 0x4f, 0x8b, 0xba, + 0x0c, 0x31, 0x76, 0x03, 0x17, 0xfd, 0xc3, 0x51, 0xc4, 0x6e, 0x27, 0xab, 0x2e, 0x93, 0xc6, 0xb7, + 0xf2, 0x1a, 0x2f, 0x2e, 0xba, 0x0f, 0xcd, 0xdc, 0x34, 0x97, 0xac, 0xfa, 0xfd, 0xfc, 0xaa, 0xcd, + 0x94, 0xa4, 0x4e, 0x37, 0x6d, 0x99, 0x17, 0xfe, 0x0b, 0xff, 0x7d, 0x04, 0x90, 0xa9, 0x7c, 0xf5, + 0xe3, 0xcb, 0xfa, 0x53, 0x09, 0x60, 0x10, 0x62, 0x0d, 0x1c, 0xd9, 0x54, 0xb5, 0x97, 0xdd, 0x49, + 0x20, 0x23, 0xf1, 0x94, 0xd2, 0x9c, 0xc6, 0xd7, 0x79, 0x53, 0xf3, 0x28, 0x63, 0xd8, 0x2e, 0x34, + 0x47, 0x22, 0x76, 0x22, 0x97, 0x02, 0xca, 0x38, 0xfd, 0x06, 0xae, 0x29, 0xd3, 0xb3, 0xd5, 0xcd, + 0x10, 0xda, 0x57, 0xf9, 0x31, 0x6c, 0x07, 0x96, 0xc5, 0x79, 0x28, 0x23, 0x65, 0x66, 0xd1, 0xcd, + 0xe5, 0x15, 0xdd, 0xa6, 0x22, 0x9f, 0x66, 0xe2, 0x4d, 0x91, 0x7d, 0x30, 0x1b, 0xca, 0x8e, 0x1d, + 0xc6, 0xa6, 0xa4, 0x77, 0x16, 0xe6, 0xdb, 0xb7, 0x43, 0xed, 0xb4, 0xbd, 0x0f, 0x71, 0xad, 0xbf, + 0xfa, 0xdb, 0x8d, 0x5b, 0xb9, 0x3e, 0xc8, 0x97, 0xc7, 0xb3, 0x6d, 0x8a, 0x97, 0x53, 0x57, 0x6d, + 0x4f, 0x95, 0xeb, 0x6d, 0xdb, 0xa1, 0x8b, 0xea, 0x70, 0x60, 0xbf, 0xcb, 0x49, 0x35, 0xfb, 0x18, + 0x5a, 0x61, 0x24, 0x27, 0x91, 0x88, 0xe3, 0xa7, 0x54, 0x15, 0x4d, 0xb7, 0xfa, 0x86, 0xa9, 0xde, + 0x24, 0xf9, 0x14, 0x05, 0x7c, 0x25, 0xcc, 0x7f, 0xae, 0xfe, 0x04, 0xda, 0x8b, 0x2b, 0x7e, 0x9d, + 0xdd, 0x5b, 0xbd, 0x0b, 0x8d, 0x74, 0x05, 0x2f, 0x1b, 0x58, 0xcf, 0x6f, 0xfb, 0x1f, 0x0b, 0x50, + 0xd5, 0xf9, 0xc8, 0xee, 0x42, 0xc3, 0x93, 0x8e, 0x8d, 0x06, 0x24, 0x37, 0x83, 0x77, 0xb2, 0x74, + 0xdd, 0xfa, 0x3c, 0x91, 0xe9, 0xfd, 0xc8, 0xb0, 0x18, 0x9e, 0x6e, 0x30, 0x96, 0x49, 0xfe, 0xb4, + 0xb2, 0x41, 0xfd, 0x60, 0x2c, 0xb9, 0x16, 0xae, 0x3e, 0x80, 0xd6, 0xbc, 0x8a, 0x4b, 0xec, 0x7c, + 0x6f, 0x3e, 0xd0, 0xa9, 0x1a, 0xa4, 0x83, 0xf2, 0x66, 0xdf, 0x85, 0x46, 0xca, 0x67, 0x9b, 0x17, + 0x0d, 0x5f, 0xce, 0x8f, 0xcc, 0xd9, 0x6a, 0xfd, 0xba, 0x00, 0x90, 0xd9, 0x86, 0xe7, 0x1c, 0xde, + 0x41, 0x82, 0xac, 0x7b, 0x48, 0xbf, 0xa9, 0xf8, 0xda, 0xca, 0x26, 0x5b, 0x96, 0x39, 0xd1, 0x6c, + 0x0b, 0x60, 0x94, 0xe6, 0xfa, 0x0b, 0x4e, 0x80, 0x1c, 0x02, 0xf5, 0x7b, 0x76, 0x30, 0x99, 0xda, + 0x13, 0x61, 0x5a, 0xbc, 0xf4, 0xdb, 0x1a, 0x40, 0x3d, 0xb1, 0x90, 0xad, 0x43, 0x33, 0x36, 0x56, + 0x61, 0x1b, 0x8d, 0xa6, 0x54, 0x78, 0x9e, 0x85, 0xed, 0x70, 0x64, 0x07, 0x13, 0x31, 0xd7, 0x0e, + 0x73, 0xe4, 0x70, 0x23, 0xb0, 0xbe, 0x80, 0x0a, 0x31, 0x30, 0x7b, 0x63, 0x65, 0x47, 0xca, 0x74, + 0xd6, 0xba, 0x79, 0x94, 0x31, 0x99, 0xb4, 0x57, 0xc6, 0xf8, 0xe6, 0x1a, 0xc0, 0xde, 0xc7, 0x16, + 0x75, 0x64, 0xdc, 0x7d, 0x19, 0x0e, 0xc5, 0xd6, 0x27, 0x50, 0x4f, 0xd8, 0xe8, 0x15, 0xcf, 0x0d, + 0x84, 0x31, 0x91, 0x68, 0xbc, 0x91, 0x38, 0x27, 0x76, 0x64, 0x3b, 0x4a, 0xe8, 0x1e, 0xa6, 0xc2, + 0x33, 0x86, 0xf5, 0x1e, 0x34, 0x73, 0x49, 0x89, 0xb1, 0xf8, 0x84, 0xf6, 0x58, 0x1f, 0x0d, 0xfa, + 0xc3, 0xfa, 0x14, 0x56, 0xe6, 0x12, 0x04, 0x2b, 0x99, 0x3b, 0x4a, 0x2a, 0x99, 0xae, 0x52, 0x17, + 0x5a, 0x31, 0x06, 0xe5, 0x33, 0x61, 0x9f, 0x9a, 0x36, 0x8c, 0x68, 0xeb, 0x0f, 0x78, 0xf1, 0x4a, + 0xda, 0xe3, 0x1f, 0x02, 0x9c, 0x28, 0x15, 0x3e, 0xa5, 0x7e, 0xd9, 0x28, 0x6b, 0x20, 0x87, 0x10, + 0xec, 0x06, 0x34, 0xf1, 0x23, 0x36, 0x72, 0xad, 0x9a, 0x46, 0xc4, 0x1a, 0xf0, 0x03, 0x68, 0x8c, + 0xd3, 0xe1, 0x25, 0x13, 0x1f, 0xc9, 0xe8, 0x77, 0xa0, 0x1e, 0x48, 0x23, 0xd3, 0x7b, 0x5b, 0x0b, + 0x64, 0x3a, 0xce, 0xf6, 0x3c, 0x23, 0xab, 0xe8, 0x71, 0xb6, 0xe7, 0x91, 0xd0, 0xba, 0x05, 0x6f, + 0x5c, 0xb8, 0x42, 0xb2, 0xb7, 0xa0, 0x3a, 0x76, 0x3d, 0x45, 0x15, 0x0b, 0xaf, 0x0b, 0xe6, 0xcb, + 0xfa, 0x57, 0x01, 0x20, 0x8b, 0x2d, 0x4c, 0x19, 0x2c, 0x3d, 0x88, 0x59, 0xd6, 0xa5, 0xc6, 0x83, + 0xba, 0x6f, 0x0e, 0x31, 0x13, 0x19, 0xd7, 0xe7, 0xe3, 0x71, 0x2b, 0x39, 0xe3, 0xf4, 0xf1, 0xb6, + 0x63, 0x8e, 0xb7, 0xd7, 0xb9, 0xe6, 0xa5, 0x33, 0x50, 0x17, 0x96, 0xbf, 0xf5, 0x43, 0x96, 0xeb, + 0xdc, 0x48, 0x56, 0x1f, 0xc0, 0xca, 0xdc, 0x94, 0xaf, 0x58, 0xd0, 0xb2, 0xc3, 0x38, 0x9f, 0xe8, + 0x3b, 0x50, 0xd5, 0xcf, 0x05, 0x6c, 0x03, 0x6a, 0xb6, 0xa3, 0x73, 0x3c, 0x77, 0xce, 0xa0, 0x70, + 0x97, 0xd8, 0x3c, 0x11, 0x5b, 0x7f, 0x29, 0x02, 0x64, 0xfc, 0xd7, 0x68, 0xc5, 0xef, 0x41, 0x2b, + 0x16, 0x8e, 0x0c, 0x46, 0x76, 0x34, 0x23, 0xa9, 0xb9, 0xcf, 0x5e, 0x36, 0x64, 0x01, 0x99, 0x6b, + 0xcb, 0x4b, 0x2f, 0x6f, 0xcb, 0x37, 0xa0, 0xec, 0xc8, 0x70, 0x66, 0xea, 0x16, 0x9b, 0x5f, 0xc8, + 0xbe, 0x0c, 0x67, 0x07, 0x4b, 0x9c, 0x10, 0x6c, 0x0b, 0xaa, 0xfe, 0x29, 0x3d, 0xa0, 0xe8, 0x8b, + 0xe0, 0xb5, 0x79, 0xec, 0xc3, 0x53, 0xa4, 0x0f, 0x96, 0xb8, 0x41, 0xb1, 0x5b, 0x50, 0xf1, 0x4f, + 0x47, 0x6e, 0x64, 0x2a, 0xcf, 0xd5, 0x45, 0x78, 0xd7, 0x8d, 0xe8, 0xbd, 0x04, 0x31, 0xcc, 0x82, + 0x62, 0xe4, 0x9b, 0xd7, 0x92, 0xf6, 0x82, 0x37, 0xfd, 0x83, 0x25, 0x5e, 0x8c, 0xfc, 0xbd, 0x3a, + 0x54, 0xb5, 0x5f, 0xad, 0x7f, 0x96, 0xa0, 0x35, 0x6f, 0x25, 0xee, 0x6c, 0x1c, 0x39, 0xc9, 0xce, + 0xc6, 0x91, 0x93, 0xde, 0x58, 0x8a, 0xb9, 0x1b, 0x8b, 0x05, 0x15, 0x79, 0x16, 0x88, 0x28, 0xff, + 0x52, 0xb4, 0x7f, 0x22, 0xcf, 0x02, 0xec, 0x9a, 0xb5, 0x68, 0xae, 0x09, 0xad, 0x98, 0x26, 0xf4, + 0x7d, 0x58, 0x19, 0x4b, 0xcf, 0x93, 0x67, 0xc3, 0x99, 0xef, 0xb9, 0xc1, 0xa9, 0xe9, 0x44, 0xe7, + 0x99, 0x6c, 0x03, 0xae, 0x8c, 0xdc, 0x08, 0xcd, 0xd9, 0x97, 0x81, 0x12, 0x01, 0xdd, 0x83, 0x11, + 0xb7, 0xc8, 0x66, 0x9f, 0xc1, 0xba, 0xad, 0x94, 0xf0, 0x43, 0xf5, 0x38, 0x08, 0x6d, 0xe7, 0xb4, + 0x2b, 0x1d, 0xca, 0x42, 0x3f, 0xb4, 0x95, 0x7b, 0xec, 0x7a, 0xae, 0x9a, 0x91, 0x33, 0xea, 0xfc, + 0xa5, 0x38, 0xf6, 0x01, 0xb4, 0x9c, 0x48, 0xd8, 0x4a, 0x74, 0x45, 0xac, 0x8e, 0x6c, 0x75, 0xd2, + 0xa9, 0xd3, 0xc8, 0x05, 0x2e, 0xae, 0xc1, 0x46, 0x6b, 0xbf, 0x70, 0xbd, 0x91, 0x83, 0x77, 0xcf, + 0x86, 0x5e, 0xc3, 0x1c, 0x93, 0x6d, 0x01, 0x23, 0x46, 0xcf, 0x0f, 0xd5, 0x2c, 0x85, 0x02, 0x41, + 0x2f, 0x91, 0xe0, 0x81, 0xab, 0x5c, 0x5f, 0xc4, 0xca, 0xf6, 0x43, 0xba, 0x56, 0x97, 0x78, 0xc6, + 0x60, 0x37, 0xa1, 0xed, 0x06, 0x8e, 0x37, 0x1d, 0x89, 0xa7, 0x21, 0x2e, 0x24, 0x0a, 0xe2, 0xce, + 0x32, 0x9d, 0x2a, 0x57, 0x0c, 0xff, 0xc8, 0xb0, 0x11, 0x2a, 0xce, 0x17, 0xa0, 0x2b, 0x1a, 0x6a, + 0xf8, 0x09, 0xd4, 0xfa, 0xb2, 0x00, 0xed, 0xc5, 0xc0, 0xc3, 0x6d, 0x0b, 0x71, 0xf1, 0xe6, 0xe6, + 0x8d, 0x74, 0xba, 0x95, 0xc5, 0xdc, 0x56, 0x26, 0xb5, 0xb4, 0x94, 0xab, 0xa5, 0x69, 0x58, 0x94, + 0x5f, 0x1c, 0x16, 0x73, 0x0b, 0xad, 0x2c, 0x2c, 0xd4, 0xfa, 0x7d, 0x01, 0xae, 0x2c, 0x04, 0xf7, + 0x2b, 0x5b, 0xb4, 0x0e, 0x4d, 0xdf, 0x3e, 0x15, 0xfa, 0xdd, 0x22, 0x36, 0x25, 0x24, 0xcf, 0xfa, + 0x1f, 0xd8, 0x17, 0xc0, 0x72, 0x3e, 0xa3, 0x2e, 0xb5, 0x2d, 0x09, 0x90, 0x43, 0xa9, 0xee, 0xcb, + 0xa9, 0xa9, 0xc5, 0x49, 0x80, 0x24, 0xcc, 0x8b, 0x61, 0x54, 0xba, 0x24, 0x8c, 0xac, 0x43, 0xa8, + 0x27, 0x06, 0xb2, 0x1b, 0xe6, 0x61, 0xa9, 0x90, 0xbd, 0x97, 0x3e, 0x8e, 0x45, 0x84, 0xb6, 0xeb, + 0x57, 0xa6, 0x77, 0xa1, 0xa2, 0x7b, 0xd4, 0xe2, 0x45, 0x84, 0x96, 0x58, 0x43, 0xa8, 0x19, 0x0e, + 0xdb, 0x84, 0xea, 0xf1, 0x2c, 0x7d, 0x64, 0x31, 0xc7, 0x05, 0x7e, 0x8f, 0x0c, 0x02, 0xcf, 0x20, + 0x8d, 0x60, 0xd7, 0xa0, 0x7c, 0x3c, 0xeb, 0x77, 0xf5, 0xad, 0x13, 0x4f, 0x32, 0xfc, 0xda, 0xab, + 0x6a, 0x83, 0xac, 0xcf, 0x61, 0x39, 0x3f, 0x2e, 0x2d, 0xec, 0x85, 0x5c, 0x61, 0x4f, 0x8f, 0xec, + 0xe2, 0xcb, 0xae, 0x1f, 0x1f, 0x01, 0xd0, 0x33, 0xf0, 0xeb, 0x5e, 0x5b, 0x7e, 0x0c, 0x35, 0xf3, + 0x7c, 0xcc, 0x3e, 0x58, 0x78, 0x0e, 0x6f, 0xa5, 0x6f, 0xcb, 0x73, 0x6f, 0xe2, 0xd6, 0x3d, 0x6c, + 0x60, 0xcf, 0x44, 0xd4, 0x75, 0xc7, 0xe3, 0xd7, 0x9d, 0xee, 0x1e, 0xb4, 0x1e, 0x87, 0xe1, 0x7f, + 0x36, 0xf6, 0xe7, 0x50, 0xd5, 0xaf, 0xd8, 0x38, 0xc6, 0x43, 0x0b, 0xcc, 0x1e, 0x30, 0xdd, 0xe4, + 0xe6, 0x4d, 0xe2, 0x1a, 0x80, 0xc8, 0x29, 0xce, 0x67, 0x36, 0x97, 0x90, 0xf3, 0x06, 0x70, 0x0d, + 0xd8, 0xdc, 0x80, 0x9a, 0x79, 0x30, 0x65, 0x0d, 0xa8, 0x3c, 0x3e, 0x1c, 0xf6, 0x1e, 0xb5, 0x97, + 0x58, 0x1d, 0xca, 0x07, 0x83, 0xe1, 0xa3, 0x76, 0x01, 0xa9, 0xc3, 0xc1, 0x61, 0xaf, 0x5d, 0xdc, + 0xbc, 0x09, 0xcb, 0xf9, 0x27, 0x53, 0xd6, 0x84, 0xda, 0x70, 0xf7, 0xb0, 0xbb, 0x37, 0xf8, 0x59, + 0x7b, 0x89, 0x2d, 0x43, 0xbd, 0x7f, 0x38, 0xec, 0xed, 0x3f, 0xe6, 0xbd, 0x76, 0x61, 0xf3, 0xa7, + 0xd0, 0x48, 0x5f, 0x91, 0x50, 0xc3, 0x5e, 0xff, 0xb0, 0xdb, 0x5e, 0x62, 0x00, 0xd5, 0x61, 0x6f, + 0x9f, 0xf7, 0x50, 0x6f, 0x0d, 0x4a, 0xc3, 0xe1, 0x41, 0xbb, 0x88, 0xb3, 0xee, 0xef, 0xee, 0x1f, + 0xf4, 0xda, 0x25, 0x24, 0x1f, 0x3d, 0x3c, 0xba, 0x3f, 0x6c, 0x97, 0x37, 0x3f, 0x82, 0x2b, 0x0b, + 0xef, 0x2b, 0x34, 0xfa, 0x60, 0x97, 0xf7, 0x50, 0x53, 0x13, 0x6a, 0x47, 0xbc, 0xff, 0x64, 0xf7, + 0x51, 0xaf, 0x5d, 0x40, 0xc1, 0xe7, 0x83, 0xfd, 0x07, 0xbd, 0x6e, 0xbb, 0xb8, 0x77, 0xfd, 0xab, + 0xe7, 0x6b, 0x85, 0x6f, 0x9e, 0xaf, 0x15, 0xbe, 0x7d, 0xbe, 0x56, 0xf8, 0xfb, 0xf3, 0xb5, 0xc2, + 0x97, 0xdf, 0xaf, 0x2d, 0x7d, 0xf3, 0xfd, 0xda, 0xd2, 0xb7, 0xdf, 0xaf, 0x2d, 0x1d, 0x57, 0xe9, + 0x7f, 0x90, 0x0f, 0xff, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xd5, 0x3c, 0x38, 0x7a, 0x47, 0x19, 0x00, + 0x00, } func (m *Op) Marshal() (dAtA []byte, err error) { @@ -4323,6 +4332,13 @@ func (m *SourceInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Language) > 0 { + i -= len(m.Language) + copy(dAtA[i:], m.Language) + i = encodeVarintOps(dAtA, i, uint64(len(m.Language))) + i-- + dAtA[i] = 0x22 + } if m.Definition != nil { { size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i]) @@ -6097,6 +6113,10 @@ func (m *SourceInfo) Size() (n int) { l = m.Definition.Size() n += 1 + l + sovOps(uint64(l)) } + l = len(m.Language) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } return n } @@ -10512,6 +10532,38 @@ func (m *SourceInfo) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Language", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Language = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.proto b/vendor/github.com/moby/buildkit/solver/pb/ops.proto index 87cb771902..4788c093b7 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.proto +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.proto @@ -228,6 +228,7 @@ message SourceInfo { string filename = 1; bytes data = 2; Definition definition = 3; + string language = 4; } // Location defines list of areas in to source file diff --git a/vendor/github.com/moby/buildkit/solver/result/attestation.go b/vendor/github.com/moby/buildkit/solver/result/attestation.go index 77af74da19..2fee278240 100644 --- a/vendor/github.com/moby/buildkit/solver/result/attestation.go +++ b/vendor/github.com/moby/buildkit/solver/result/attestation.go @@ -1,8 +1,6 @@ package result import ( - "reflect" - pb "github.com/moby/buildkit/frontend/gateway/pb" digest "github.com/opencontainers/go-digest" ) @@ -58,9 +56,11 @@ func FromDigestMap(m map[string]string) []digest.Digest { return ds } -func ConvertAttestation[U any, V any](a *Attestation[U], fn func(U) (V, error)) (*Attestation[V], error) { +func ConvertAttestation[U comparable, V comparable](a *Attestation[U], fn func(U) (V, error)) (*Attestation[V], error) { + var zero U + var ref V - if reflect.ValueOf(a.Ref).IsValid() { + if a.Ref != zero { var err error ref, err = fn(a.Ref) if err != nil { diff --git a/vendor/github.com/moby/buildkit/solver/result/result.go b/vendor/github.com/moby/buildkit/solver/result/result.go index d5fe2d03cf..cfcfe9dcbd 100644 --- a/vendor/github.com/moby/buildkit/solver/result/result.go +++ b/vendor/github.com/moby/buildkit/solver/result/result.go @@ -1,13 +1,12 @@ package result import ( - "reflect" "sync" "github.com/pkg/errors" ) -type Result[T any] struct { +type Result[T comparable] struct { mu sync.Mutex Ref T Refs map[string]T @@ -50,7 +49,8 @@ func (r *Result[T]) SingleRef() (T, error) { r.mu.Lock() defer r.mu.Unlock() - if r.Refs != nil && !reflect.ValueOf(r.Ref).IsValid() { + var zero T + if r.Refs != nil && r.Ref == zero { var t T return t, errors.Errorf("invalid map result") } @@ -77,11 +77,12 @@ func (r *Result[T]) FindRef(key string) (T, bool) { } func (r *Result[T]) EachRef(fn func(T) error) (err error) { - if reflect.ValueOf(r.Ref).IsValid() { + var zero T + if r.Ref != zero { err = fn(r.Ref) } for _, r := range r.Refs { - if reflect.ValueOf(r).IsValid() { + if r != zero { if err1 := fn(r); err1 != nil && err == nil { err = err1 } @@ -89,7 +90,7 @@ func (r *Result[T]) EachRef(fn func(T) error) (err error) { } for _, as := range r.Attestations { for _, a := range as { - if reflect.ValueOf(a.Ref).IsValid() { + if a.Ref != zero { if err1 := fn(a.Ref); err1 != nil && err == nil { err = err1 } @@ -102,8 +103,12 @@ func (r *Result[T]) EachRef(fn func(T) error) (err error) { // EachRef iterates over references in both a and b. // a and b are assumed to be of the same size and map their references // to the same set of keys -func EachRef[U any, V any](a *Result[U], b *Result[V], fn func(U, V) error) (err error) { - if reflect.ValueOf(a.Ref).IsValid() && reflect.ValueOf(b.Ref).IsValid() { +func EachRef[U comparable, V comparable](a *Result[U], b *Result[V], fn func(U, V) error) (err error) { + var ( + zeroU U + zeroV V + ) + if a.Ref != zeroU && b.Ref != zeroV { err = fn(a.Ref, b.Ref) } for k, r := range a.Refs { @@ -111,7 +116,7 @@ func EachRef[U any, V any](a *Result[U], b *Result[V], fn func(U, V) error) (err if !ok { continue } - if reflect.ValueOf(r).IsValid() && reflect.ValueOf(r2).IsValid() { + if r != zeroU && r2 != zeroV { if err1 := fn(r, r2); err1 != nil && err == nil { err = err1 } @@ -127,7 +132,7 @@ func EachRef[U any, V any](a *Result[U], b *Result[V], fn func(U, V) error) (err break } att2 := atts2[i] - if reflect.ValueOf(att.Ref).IsValid() && reflect.ValueOf(att2.Ref).IsValid() { + if att.Ref != zeroU && att2.Ref != zeroV { if err1 := fn(att.Ref, att2.Ref); err1 != nil && err == nil { err = err1 } @@ -137,11 +142,13 @@ func EachRef[U any, V any](a *Result[U], b *Result[V], fn func(U, V) error) (err return err } -func ConvertResult[U any, V any](r *Result[U], fn func(U) (V, error)) (*Result[V], error) { +func ConvertResult[U comparable, V comparable](r *Result[U], fn func(U) (V, error)) (*Result[V], error) { + var zero U + r2 := &Result[V]{} var err error - if reflect.ValueOf(r.Ref).IsValid() { + if r.Ref != zero { r2.Ref, err = fn(r.Ref) if err != nil { return nil, err @@ -152,7 +159,7 @@ func ConvertResult[U any, V any](r *Result[U], fn func(U) (V, error)) (*Result[V r2.Refs = map[string]V{} } for k, r := range r.Refs { - if !reflect.ValueOf(r).IsValid() { + if r == zero { continue } r2.Refs[k], err = fn(r) diff --git a/vendor/github.com/moby/buildkit/solver/types.go b/vendor/github.com/moby/buildkit/solver/types.go index 6635daef0e..01b344a3af 100644 --- a/vendor/github.com/moby/buildkit/solver/types.go +++ b/vendor/github.com/moby/buildkit/solver/types.go @@ -241,7 +241,7 @@ type CacheManager interface { // Query searches for cache paths from one cache key to the output of a // possible match. Query(inp []CacheKeyWithSelector, inputIndex Index, dgst digest.Digest, outputIndex Index) ([]*CacheKey, error) - Records(ck *CacheKey) ([]*CacheRecord, error) + Records(ctx context.Context, ck *CacheKey) ([]*CacheRecord, error) // Load loads a cache record into a result reference. Load(ctx context.Context, rec *CacheRecord) (Result, error) diff --git a/vendor/github.com/moby/buildkit/source/containerimage/pull.go b/vendor/github.com/moby/buildkit/source/containerimage/pull.go index 509d2a9946..8792111aaa 100644 --- a/vendor/github.com/moby/buildkit/source/containerimage/pull.go +++ b/vendor/github.com/moby/buildkit/source/containerimage/pull.go @@ -60,9 +60,15 @@ type SourceOpt struct { LeaseManager leases.Manager } +type resolveImageResult struct { + ref string + dgst digest.Digest + dt []byte +} + type Source struct { SourceOpt - g flightcontrol.Group + g flightcontrol.Group[*resolveImageResult] } var _ source.Source = &Source{} @@ -82,12 +88,7 @@ func (is *Source) ID() string { return srctypes.DockerImageScheme } -func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) { - type t struct { - dgst digest.Digest - dt []byte - } - var typed *t +func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) { key := ref if platform := opt.Platform; platform != nil { key += platforms.Format(*platform) @@ -102,7 +103,7 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.Re case ResolverTypeRegistry: rm, err = source.ParseImageResolveMode(opt.ResolveMode) if err != nil { - return "", nil, err + return "", "", nil, err } rslvr = resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g).WithImageStore(is.ImageStore, rm) case ResolverTypeOCILayout: @@ -110,18 +111,17 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.Re rslvr = getOCILayoutResolver(opt.Store, sm, g) } key += rm.String() - res, err := is.g.Do(ctx, key, func(ctx context.Context) (interface{}, error) { - dgst, dt, err := imageutil.Config(ctx, ref, rslvr, is.ContentStore, is.LeaseManager, opt.Platform) + res, err := is.g.Do(ctx, key, func(ctx context.Context) (*resolveImageResult, error) { + newRef, dgst, dt, err := imageutil.Config(ctx, ref, rslvr, is.ContentStore, is.LeaseManager, opt.Platform, opt.SourcePolicies) if err != nil { return nil, err } - return &t{dgst: dgst, dt: dt}, nil + return &resolveImageResult{dgst: dgst, dt: dt, ref: newRef}, nil }) if err != nil { - return "", nil, err + return "", "", nil, err } - typed = res.(*t) - return typed.dgst, typed.dt, nil + return res.ref, res.dgst, res.dt, nil } func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) { @@ -205,7 +205,7 @@ type puller struct { ResolverType store llb.ResolveImageConfigOptStore - g flightcontrol.Group + g flightcontrol.Group[struct{}] cacheKeyErr error cacheKeyDone bool releaseTmpLeases func(context.Context) error @@ -255,9 +255,9 @@ func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cach // be canceled before the progress output is complete progressFactory := progress.FromContext(ctx) - _, err = p.g.Do(ctx, "", func(ctx context.Context) (_ interface{}, err error) { + _, err = p.g.Do(ctx, "", func(ctx context.Context) (_ struct{}, err error) { if p.cacheKeyErr != nil || p.cacheKeyDone { - return nil, p.cacheKeyErr + return struct{}{}, p.cacheKeyErr } defer func() { if !errdefs.IsCanceled(ctx, err) { @@ -266,7 +266,7 @@ func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cach }() ctx, done, err := leaseutil.WithLease(ctx, p.LeaseManager, leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary) if err != nil { - return nil, err + return struct{}{}, err } p.releaseTmpLeases = done defer imageutil.AddLease(done) @@ -278,12 +278,12 @@ func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cach p.manifest, err = p.PullManifests(ctx, getResolver) if err != nil { - return nil, err + return struct{}{}, err } if ll := p.layerLimit; ll != nil { if *ll > len(p.manifest.Descriptors) { - return nil, errors.Errorf("layer limit %d is greater than the number of layers in the image %d", *ll, len(p.manifest.Descriptors)) + return struct{}{}, errors.Errorf("layer limit %d is greater than the number of layers in the image %d", *ll, len(p.manifest.Descriptors)) } p.manifest.Descriptors = p.manifest.Descriptors[:*ll] } @@ -320,21 +320,21 @@ func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cach desc := p.manifest.MainManifestDesc k, err := mainManifestKey(ctx, desc, p.Platform, p.layerLimit) if err != nil { - return nil, err + return struct{}{}, err } p.manifestKey = k.String() dt, err := content.ReadBlob(ctx, p.ContentStore, p.manifest.ConfigDesc) if err != nil { - return nil, err + return struct{}{}, err } ck, err := cacheKeyFromConfig(dt, p.layerLimit) if err != nil { - return nil, err + return struct{}{}, err } p.configKey = ck.String() p.cacheKeyDone = true - return nil, nil + return struct{}{}, nil }) if err != nil { return "", "", nil, false, err diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource.go b/vendor/github.com/moby/buildkit/source/git/gitsource.go index dd35fe55f7..fdc1b50028 100644 --- a/vendor/github.com/moby/buildkit/source/git/gitsource.go +++ b/vendor/github.com/moby/buildkit/source/git/gitsource.go @@ -671,13 +671,14 @@ func git(ctx context.Context, dir, sshAuthSock, knownHosts string, args ...strin // "GIT_TRACE=1", "GIT_CONFIG_NOSYSTEM=1", // Disable reading from system gitconfig. "HOME=/dev/null", // Disable reading from user gitconfig. + "LC_ALL=C", // Ensure consistent output. } if sshAuthSock != "" { cmd.Env = append(cmd.Env, "SSH_AUTH_SOCK="+sshAuthSock) } // remote git commands spawn helper processes that inherit FDs and don't // handle parent death signal so exec.CommandContext can't be used - err := runProcessGroup(ctx, cmd) + err := runWithStandardUmask(ctx, cmd) if err != nil { if strings.Contains(errbuf.String(), "--depth") || strings.Contains(errbuf.String(), "shallow") { if newArgs := argsNoDepth(args); len(args) > len(newArgs) { diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go b/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go index cb49917573..142ae56091 100644 --- a/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go +++ b/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go @@ -5,80 +5,43 @@ package git import ( "context" - "os" "os/exec" - "os/signal" + "runtime" "syscall" "time" - "github.com/docker/docker/pkg/reexec" "golang.org/x/sys/unix" ) -const ( - gitCmd = "umask-git" -) +func runWithStandardUmask(ctx context.Context, cmd *exec.Cmd) error { + errCh := make(chan error) -func init() { - reexec.Register(gitCmd, gitMain) + go func() { + defer close(errCh) + runtime.LockOSThread() + + if err := unshareAndRun(ctx, cmd); err != nil { + errCh <- err + } + }() + + return <-errCh } -func gitMain() { - // Need standard user umask for git process. - unix.Umask(0022) +// unshareAndRun needs to be called in a locked thread. +func unshareAndRun(ctx context.Context, cmd *exec.Cmd) error { + if err := syscall.Unshare(syscall.CLONE_FS); err != nil { + return err + } + syscall.Umask(0022) + return runProcessGroup(ctx, cmd) +} - // Reexec git command - cmd := exec.Command(os.Args[1], os.Args[2:]...) //nolint:gosec // reexec +func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error { cmd.SysProcAttr = &unix.SysProcAttr{ Setpgid: true, Pdeathsig: unix.SIGTERM, } - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Stdin = os.Stdin - - // Forward all signals - sigc := make(chan os.Signal, 1) - done := make(chan struct{}) - signal.Notify(sigc) - go func() { - for { - select { - case sig := <-sigc: - if cmd.Process == nil { - continue - } - switch sig { - case unix.SIGINT, unix.SIGTERM, unix.SIGKILL: - _ = unix.Kill(-cmd.Process.Pid, sig.(unix.Signal)) - default: - _ = cmd.Process.Signal(sig) - } - case <-done: - return - } - } - }() - - err := cmd.Run() - close(done) - if err != nil { - if exiterr, ok := err.(*exec.ExitError); ok { - switch status := exiterr.Sys().(type) { - case unix.WaitStatus: - os.Exit(status.ExitStatus()) - case syscall.WaitStatus: - os.Exit(status.ExitStatus()) - } - } - os.Exit(1) - } - os.Exit(0) -} - -func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error { - cmd.Path = reexec.Self() - cmd.Args = append([]string{gitCmd}, cmd.Args...) if err := cmd.Start(); err != nil { return err } diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go b/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go index a1952ecb0c..8c8a1d3dcf 100644 --- a/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go +++ b/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go @@ -8,7 +8,7 @@ import ( "os/exec" ) -func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error { +func runWithStandardUmask(ctx context.Context, cmd *exec.Cmd) error { if err := cmd.Start(); err != nil { return err } diff --git a/vendor/github.com/moby/buildkit/source/http/httpsource.go b/vendor/github.com/moby/buildkit/source/http/httpsource.go index 968c635651..9fde3cdee7 100644 --- a/vendor/github.com/moby/buildkit/source/http/httpsource.go +++ b/vendor/github.com/moby/buildkit/source/http/httpsource.go @@ -178,6 +178,9 @@ func (hs *httpSourceHandler) CacheKey(ctx context.Context, g session.Group, inde // manual ETag value comparison. if len(m) > 0 { req.Method = "HEAD" + // we need to add accept-encoding header manually because stdlib only adds it to GET requests + // some servers will return different etags if Accept-Encoding header is different + req.Header.Add("Accept-Encoding", "gzip") resp, err := client.Do(req) if err == nil { if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotModified { @@ -202,6 +205,10 @@ func (hs *httpSourceHandler) CacheKey(ctx context.Context, g session.Group, inde resp.Body.Close() } req.Method = "GET" + // Unset explicit Accept-Encoding for GET, otherwise the go http library will not + // transparently decompress the response body when it is gzipped. It will still add + // this header implicitly when the request is made though. + req.Header.Del("Accept-Encoding") } resp, err := client.Do(req) @@ -392,6 +399,9 @@ func (hs *httpSourceHandler) Snapshot(ctx context.Context, g session.Group) (cac if err != nil { return nil, err } + defer func() { + _ = resp.Body.Close() + }() ref, dgst, err := hs.save(ctx, resp, g) if err != nil { diff --git a/vendor/github.com/moby/buildkit/sourcepolicy/engine.go b/vendor/github.com/moby/buildkit/sourcepolicy/engine.go index 829e851065..8515b276a4 100644 --- a/vendor/github.com/moby/buildkit/sourcepolicy/engine.go +++ b/vendor/github.com/moby/buildkit/sourcepolicy/engine.go @@ -7,6 +7,7 @@ import ( spb "github.com/moby/buildkit/sourcepolicy/pb" "github.com/moby/buildkit/util/bklog" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) var ( @@ -112,12 +113,20 @@ func (e *Engine) evaluatePolicies(ctx context.Context, srcOp *pb.SourceOp) (bool // // For Allow/Deny rules, the last matching rule wins. // E.g. `ALLOW foo; DENY foo` will deny `foo`, `DENY foo; ALLOW foo` will allow `foo`. -func (e *Engine) evaluatePolicy(ctx context.Context, pol *spb.Policy, srcOp *pb.SourceOp) (bool, error) { +func (e *Engine) evaluatePolicy(ctx context.Context, pol *spb.Policy, srcOp *pb.SourceOp) (retMut bool, retErr error) { ident := srcOp.GetIdentifier() - ctx = bklog.WithLogger(ctx, bklog.G(ctx).WithFields(map[string]interface{}{ - "ref": ident, - })) + ctx = bklog.WithLogger(ctx, bklog.G(ctx).WithField("ref", ident)) + defer func() { + if retMut || retErr != nil { + bklog.G(ctx).WithFields( + logrus.Fields{ + "mutated": retMut, + "updated": srcOp.GetIdentifier(), + logrus.ErrorKey: retErr, + }).Debug("Evaluated source policy") + } + }() var deny bool for _, rule := range pol.Rules { diff --git a/vendor/github.com/moby/buildkit/util/archutil/Dockerfile b/vendor/github.com/moby/buildkit/util/archutil/Dockerfile index 9f8e59d9db..2b24b230b3 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/Dockerfile +++ b/vendor/github.com/moby/buildkit/util/archutil/Dockerfile @@ -52,7 +52,7 @@ FROM base AS exit-mips64 COPY fixtures/exit.mips64.s . RUN mips64-linux-gnuabi64-as --noexecstack -o exit.o exit.mips64.s && mips64-linux-gnuabi64-ld -o exit -s exit.o -FROM golang:1.19-alpine AS generate +FROM golang:1.20-alpine AS generate WORKDIR /src COPY --from=exit-amd64 /src/exit amd64 COPY --from=exit-386 /src/exit 386 diff --git a/vendor/github.com/moby/buildkit/util/archutil/detect.go b/vendor/github.com/moby/buildkit/util/archutil/detect.go index 3184f9e548..7826441271 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/detect.go +++ b/vendor/github.com/moby/buildkit/util/archutil/detect.go @@ -6,8 +6,8 @@ import ( "sync" "github.com/containerd/containerd/platforms" + "github.com/moby/buildkit/util/bklog" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" ) var mu sync.Mutex @@ -181,10 +181,10 @@ func amd64vector(v string) (out []string) { func printPlatformWarning(p ocispecs.Platform, err error) { if strings.Contains(err.Error(), "exec format error") { - logrus.Warnf("platform %s cannot pass the validation, kernel support for miscellaneous binary may have not enabled.", platforms.Format(p)) + bklog.L.Warnf("platform %s cannot pass the validation, kernel support for miscellaneous binary may have not enabled.", platforms.Format(p)) } else if strings.Contains(err.Error(), "no such file or directory") { - logrus.Warnf("platforms %s cannot pass the validation, '-F' flag might have not set for 'archutil'.", platforms.Format(p)) + bklog.L.Warnf("platforms %s cannot pass the validation, '-F' flag might have not set for 'archutil'.", platforms.Format(p)) } else { - logrus.Warnf("platforms %s cannot pass the validation: %s", platforms.Format(p), err.Error()) + bklog.L.Warnf("platforms %s cannot pass the validation: %s", platforms.Format(p), err.Error()) } } diff --git a/vendor/github.com/moby/buildkit/util/attestation/types.go b/vendor/github.com/moby/buildkit/util/attestation/types.go index 35f4404cd6..accccd307e 100644 --- a/vendor/github.com/moby/buildkit/util/attestation/types.go +++ b/vendor/github.com/moby/buildkit/util/attestation/types.go @@ -1,8 +1,6 @@ package attestation const ( - MediaTypeDockerSchema2AttestationType = "application/vnd.in-toto+json" - DockerAnnotationReferenceType = "vnd.docker.reference.type" DockerAnnotationReferenceDigest = "vnd.docker.reference.digest" DockerAnnotationReferenceDescription = "vnd.docker.reference.description" diff --git a/vendor/github.com/moby/buildkit/util/bklog/log.go b/vendor/github.com/moby/buildkit/util/bklog/log.go index d7f202210d..7d0b1d90df 100644 --- a/vendor/github.com/moby/buildkit/util/bklog/log.go +++ b/vendor/github.com/moby/buildkit/util/bklog/log.go @@ -2,6 +2,7 @@ package bklog import ( "context" + "runtime/debug" "github.com/containerd/containerd/log" "github.com/sirupsen/logrus" @@ -61,3 +62,15 @@ func GetLogger(ctx context.Context) (l *logrus.Entry) { return l } + +// LazyStackTrace lets you include a stack trace as a field's value in a log but only +// call it when the log level is actually enabled. +type LazyStackTrace struct{} + +func (LazyStackTrace) String() string { + return string(debug.Stack()) +} + +func (LazyStackTrace) MarshalText() ([]byte, error) { + return debug.Stack(), nil +} diff --git a/vendor/github.com/moby/buildkit/util/buildinfo/buildinfo.go b/vendor/github.com/moby/buildkit/util/buildinfo/buildinfo.go deleted file mode 100644 index e3486e8e4f..0000000000 --- a/vendor/github.com/moby/buildkit/util/buildinfo/buildinfo.go +++ /dev/null @@ -1,452 +0,0 @@ -// Package buildinfo implements utilities for build information. -// -// Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md -package buildinfo - -import ( - "context" - "encoding/base64" - "encoding/json" - "sort" - "strings" - - ctnref "github.com/containerd/containerd/reference" - "github.com/docker/distribution/reference" - "github.com/moby/buildkit/exporter/containerimage/exptypes" - "github.com/moby/buildkit/solver/llbsolver/provenance" - "github.com/moby/buildkit/source" - binfotypes "github.com/moby/buildkit/util/buildinfo/types" - "github.com/moby/buildkit/util/urlutil" - "github.com/pkg/errors" -) - -func FromProvenance(c *provenance.Capture) (*binfotypes.BuildInfo, error) { - var bi binfotypes.BuildInfo - - bi.Frontend = c.Frontend - bi.Attrs = map[string]*string{} - for k, v := range c.Args { - v := v - bi.Attrs[k] = &v - } - - for _, s := range c.Sources.Images { - bi.Sources = append(bi.Sources, binfotypes.Source{ - Type: binfotypes.SourceTypeDockerImage, - Ref: s.Ref, - Pin: s.Digest.String(), - }) - } - - for _, s := range c.Sources.HTTP { - bi.Sources = append(bi.Sources, binfotypes.Source{ - Type: binfotypes.SourceTypeHTTP, - Ref: s.URL, - Pin: s.Digest.String(), - }) - } - - for _, s := range c.Sources.Git { - bi.Sources = append(bi.Sources, binfotypes.Source{ - Type: binfotypes.SourceTypeGit, - Ref: s.URL, - Pin: s.Commit, - }) - } - - sort.Slice(bi.Sources, func(i, j int) bool { - return bi.Sources[i].Ref < bi.Sources[j].Ref - }) - - return &bi, nil -} - -func AddMetadata(metadata map[string][]byte, key string, c *provenance.Capture) error { - bi, err := FromProvenance(c) - if err != nil { - return err - } - dt, err := json.Marshal(bi) - if err != nil { - return err - } - metadata[key] = dt - return nil -} - -// Decode decodes a base64 encoded build info. -func Decode(enc string) (bi binfotypes.BuildInfo, _ error) { - dec, err := base64.StdEncoding.DecodeString(enc) - if err != nil { - return bi, err - } - err = json.Unmarshal(dec, &bi) - return bi, err -} - -// Encode encodes build info. -func Encode(ctx context.Context, metadata map[string][]byte, key string, llbSources map[string]string) ([]byte, error) { - var bi binfotypes.BuildInfo - if metadata == nil { - metadata = make(map[string][]byte) - } - if v, ok := metadata[key]; ok && v != nil { - if err := json.Unmarshal(v, &bi); err != nil { - return nil, err - } - } - if sources, err := mergeSources(llbSources, bi.Sources); err == nil { - bi.Sources = sources - } else { - return nil, err - } - bi.Sources = dedupSources(bi.Sources, allDepsSources(bi.Deps, nil)) - return json.Marshal(bi) -} - -// mergeSources combines and fixes build sources from frontend sources. -func mergeSources(llbSources map[string]string, frontendSources []binfotypes.Source) ([]binfotypes.Source, error) { - if llbSources == nil { - llbSources = make(map[string]string) - } - // iterate and combine build sources - mbs := map[string]binfotypes.Source{} - for llbSource, pin := range llbSources { - src, err := source.FromString(llbSource) - if err != nil { - return nil, err - } - switch sourceID := src.(type) { - case *source.ImageIdentifier: - for i, fsrc := range frontendSources { - if fsrc.Type != binfotypes.SourceTypeDockerImage { - continue - } - // use original user input from frontend sources - if fsrc.Alias == sourceID.Reference.String() || fsrc.Pin == pin { - if fsrc.Alias == "" { - fsrc.Alias = sourceID.Reference.String() - } - parsed, err := reference.ParseNormalizedNamed(fsrc.Ref) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse %s", fsrc.Ref) - } - mbs[fsrc.Alias] = binfotypes.Source{ - Type: binfotypes.SourceTypeDockerImage, - Ref: reference.TagNameOnly(parsed).String(), - Pin: pin, - } - frontendSources = append(frontendSources[:i], frontendSources[i+1:]...) - break - } - } - if _, ok := mbs[sourceID.Reference.String()]; !ok { - mbs[sourceID.Reference.String()] = binfotypes.Source{ - Type: binfotypes.SourceTypeDockerImage, - Ref: sourceID.Reference.String(), - Pin: pin, - } - } - case *source.GitIdentifier: - sref := sourceID.Remote - if len(sourceID.Ref) > 0 { - sref += "#" + sourceID.Ref - } - if len(sourceID.Subdir) > 0 { - sref += ":" + sourceID.Subdir - } - if _, ok := mbs[sref]; !ok { - mbs[sref] = binfotypes.Source{ - Type: binfotypes.SourceTypeGit, - Ref: urlutil.RedactCredentials(sref), - Pin: pin, - } - } - case *source.HTTPIdentifier: - if _, ok := mbs[sourceID.URL]; !ok { - mbs[sourceID.URL] = binfotypes.Source{ - Type: binfotypes.SourceTypeHTTP, - Ref: urlutil.RedactCredentials(sourceID.URL), - Pin: pin, - } - } - } - } - - // leftover sources in frontend. Mostly duplicated ones we don't need but - // there is an edge case if no instruction except sources one is defined - // (e.g. FROM ...) that can be valid so take it into account. - for _, fsrc := range frontendSources { - if fsrc.Type != binfotypes.SourceTypeDockerImage { - continue - } - if _, ok := mbs[fsrc.Alias]; !ok { - parsed, err := reference.ParseNormalizedNamed(fsrc.Ref) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse %s", fsrc.Ref) - } - mbs[fsrc.Alias] = binfotypes.Source{ - Type: binfotypes.SourceTypeDockerImage, - Ref: reference.TagNameOnly(parsed).String(), - Pin: fsrc.Pin, - } - } - } - - srcs := make([]binfotypes.Source, 0, len(mbs)) - for _, bs := range mbs { - srcs = append(srcs, bs) - } - sort.Slice(srcs, func(i, j int) bool { - return srcs[i].Ref < srcs[j].Ref - }) - - return srcs, nil -} - -// decodeDeps decodes dependencies (buildinfo) added via the input context. -func decodeDeps(key string, attrs map[string]*string) (map[string]binfotypes.BuildInfo, error) { - var platform string - // extract platform from metadata key - if skey := strings.SplitN(key, "/", 2); len(skey) == 2 { - platform = skey[1] - } - - res := make(map[string]binfotypes.BuildInfo) - for k, v := range attrs { - // dependencies are only handled via the input context - if v == nil || !strings.HasPrefix(k, "input-metadata:") { - continue - } - - // if platform is defined in the key, only decode dependencies - // for that platform and vice versa - hasPlatform := len(strings.SplitN(k, "::", 2)) == 2 - if (platform != "" && !hasPlatform) || (platform == "" && hasPlatform) { - continue - } - - // decode input metadata - var inputresp map[string]string - if err := json.Unmarshal([]byte(*v), &inputresp); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal input-metadata") - } - - // check buildinfo key is present - if _, ok := inputresp[exptypes.ExporterBuildInfo]; !ok { - continue - } - - // decode buildinfo - bi, err := Decode(inputresp[exptypes.ExporterBuildInfo]) - if err != nil { - return nil, errors.Wrap(err, "failed to decode buildinfo from input-metadata") - } - - // set dep key - var depkey string - kl := strings.SplitN(k, ":", 2) - if len(kl) != 2 { - continue - } - depkey = strings.SplitN(kl[1], "::", 2)[0] - if platform != "" { - depkey = strings.TrimSuffix(depkey, "::"+platform) - } - - res[depkey] = bi - } - if len(res) == 0 { - return nil, nil - } - return res, nil -} - -// dedupSources deduplicates regular sources from dependencies ones. -func dedupSources(sources []binfotypes.Source, depsSources []binfotypes.Source) (srcs []binfotypes.Source) { - // dedup sources from deps - msrc := make(map[binfotypes.Source]struct{}) -sourcesloop: - for _, src := range sources { - for _, srcd := range depsSources { - if src == srcd { - continue sourcesloop - } - if src.Type == binfotypes.SourceTypeDockerImage && srcd.Type == binfotypes.SourceTypeDockerImage { - _, dgst := ctnref.SplitObject(src.Ref) - if dgst != "" && src.Pin == srcd.Pin { - continue sourcesloop - } - } - } - if _, ok := msrc[src]; !ok { - msrc[src] = struct{}{} - } - } - for src := range msrc { - srcs = append(srcs, src) - } - sort.Slice(srcs, func(i, j int) bool { - return srcs[i].Ref < srcs[j].Ref - }) - return srcs -} - -// allDepsSources gathers dependencies sources. -func allDepsSources(deps map[string]binfotypes.BuildInfo, visited map[binfotypes.Source]struct{}) (res []binfotypes.Source) { - if visited == nil { - visited = make(map[binfotypes.Source]struct{}) - } - if len(deps) == 0 { - return res - } - for _, dbi := range deps { - for _, dsrc := range dbi.Sources { - if _, ok := visited[dsrc]; ok { - continue - } - visited[dsrc] = struct{}{} - } - res = allDepsSources(dbi.Deps, visited) - } - for src := range visited { - res = append(res, src) - } - return res -} - -// FormatOpts holds build info format options. -type FormatOpts struct { - RemoveAttrs bool -} - -// Format formats build info. -func Format(dt []byte, opts FormatOpts) (_ []byte, err error) { - if len(dt) == 0 { - return dt, nil - } - - var bi binfotypes.BuildInfo - if err := json.Unmarshal(dt, &bi); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal buildinfo for formatting") - } - - if opts.RemoveAttrs { - bi.Attrs = nil - if len(bi.Deps) > 0 { - bi.Sources = dedupSources(append(bi.Sources, allDepsSources(bi.Deps, nil)...), nil) - bi.Deps = nil - } - } - - if dt, err = json.Marshal(bi); err != nil { - return nil, err - } - return dt, nil -} - -var knownAttrs = []string{ - //"cmdline", - "context", - "filename", - "source", - - //"add-hosts", - //"cgroup-parent", - //"force-network-mode", - //"hostname", - //"image-resolve-mode", - //"platform", - "shm-size", - "target", - "ulimit", -} - -// filterAttrs filters frontent opt by picking only those that -// could effectively change the build result. -func filterAttrs(key string, attrs map[string]*string) map[string]*string { - var platform string - // extract platform from metadata key - skey := strings.SplitN(key, "/", 2) - if len(skey) == 2 { - platform = skey[1] - } - filtered := make(map[string]*string) - for k, v := range attrs { - if v == nil { - continue - } - // control args are filtered out - if isControlArg(k) { - continue - } - // always include - if strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") || strings.HasPrefix(k, "vcs:") { - filtered[k] = v - continue - } - // input context key and value has to be cleaned up - // before being included - if strings.HasPrefix(k, "context:") { - ctxkey := strings.SplitN(k, "::", 2) - hasCtxPlatform := len(ctxkey) == 2 - // if platform is set and also defined in key, set context - // for the right one. - if hasCtxPlatform && platform != "" && platform != ctxkey[1] { - continue - } - if platform == "" && hasCtxPlatform { - ctxval := strings.TrimSuffix(*v, "::"+ctxkey[1]) - filtered[strings.TrimSuffix(k, "::"+ctxkey[1])] = &ctxval - continue - } - ctxival := strings.TrimSuffix(*v, "::"+platform) - filtered[strings.TrimSuffix(k, "::"+platform)] = &ctxival - continue - } - // filter only for known attributes - for _, knownAttr := range knownAttrs { - if knownAttr == k { - filtered[k] = v - break - } - } - } - return filtered -} - -var knownControlArgs = []string{ - "BUILDKIT_CACHE_MOUNT_NS", - "BUILDKIT_CONTEXT_KEEP_GIT_DIR", - "BUILDKIT_BUILDINFO", - "BUILDKIT_INLINE_BUILDINFO_ATTRS", - "BUILDKIT_INLINE_CACHE", - "BUILDKIT_MULTI_PLATFORM", - "BUILDKIT_SANDBOX_HOSTNAME", - "BUILDKIT_SYNTAX", -} - -// isControlArg checks if a build attributes is a control arg -func isControlArg(attrKey string) bool { - for _, k := range knownControlArgs { - if strings.HasPrefix(attrKey, "build-arg:"+k) { - return true - } - } - return false -} - -func reduceMapString(m1 map[string]string, m2 map[string]*string) map[string]string { - if m1 == nil && m2 == nil { - return nil - } - if m1 == nil { - m1 = map[string]string{} - } - for k, v := range m2 { - if v != nil { - m1[k] = *v - } - } - return m1 -} diff --git a/vendor/github.com/moby/buildkit/util/buildinfo/types/types.go b/vendor/github.com/moby/buildkit/util/buildinfo/types/types.go deleted file mode 100644 index 06cf09681e..0000000000 --- a/vendor/github.com/moby/buildkit/util/buildinfo/types/types.go +++ /dev/null @@ -1,55 +0,0 @@ -// Package binfotypes implements types for build information. -// -// Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md -package binfotypes - -import ( - srctypes "github.com/moby/buildkit/source/types" -) - -// ImageConfigField defines the key of build dependencies. -const ImageConfigField = "moby.buildkit.buildinfo.v1" - -// ImageConfig defines the structure of build dependencies -// inside image config. -type ImageConfig struct { - BuildInfo string `json:"moby.buildkit.buildinfo.v1,omitempty"` -} - -// BuildInfo defines the main structure added to image config as -// ImageConfigField key and returned in solver ExporterResponse as -// exptypes.ExporterBuildInfo key. -type BuildInfo struct { - // Frontend defines the frontend used to build. - Frontend string `json:"frontend,omitempty"` - // Attrs defines build request attributes. - Attrs map[string]*string `json:"attrs,omitempty"` - // Sources defines build dependencies. - Sources []Source `json:"sources,omitempty"` - // Deps defines context dependencies. - Deps map[string]BuildInfo `json:"deps,omitempty"` -} - -// Source defines a build dependency. -type Source struct { - // Type defines the SourceType source type (docker-image, git, http). - Type SourceType `json:"type,omitempty"` - // Ref is the reference of the source. - Ref string `json:"ref,omitempty"` - // Alias is a special field used to match with the actual source ref - // because frontend might have already transformed a string user typed - // before generating LLB. - Alias string `json:"alias,omitempty"` - // Pin is the source digest. - Pin string `json:"pin,omitempty"` -} - -// SourceType contains source type. -type SourceType string - -// List of source types. -const ( - SourceTypeDockerImage SourceType = srctypes.DockerImageScheme - SourceTypeGit SourceType = srctypes.GitScheme - SourceTypeHTTP SourceType = srctypes.HTTPScheme -) diff --git a/vendor/github.com/moby/buildkit/util/compression/attrs.go b/vendor/github.com/moby/buildkit/util/compression/attrs.go new file mode 100644 index 0000000000..1f986d3712 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/compression/attrs.go @@ -0,0 +1,48 @@ +package compression + +import ( + "strconv" + + "github.com/pkg/errors" +) + +const ( + attrLayerCompression = "compression" + attrForceCompression = "force-compression" + attrCompressionLevel = "compression-level" +) + +func ParseAttributes(attrs map[string]string) (Config, error) { + var compressionType Type + if v, ok := attrs[attrLayerCompression]; ok { + c, err := Parse(v) + if err != nil { + return Config{}, err + } + compressionType = c + } else { + compressionType = Default + } + compressionConfig := New(compressionType) + if v, ok := attrs[attrForceCompression]; ok { + var force bool + if v == "" { + force = true + } else { + b, err := strconv.ParseBool(v) + if err != nil { + return Config{}, errors.Wrapf(err, "non-bool value %s specified for %s", v, attrForceCompression) + } + force = b + } + compressionConfig = compressionConfig.SetForce(force) + } + if v, ok := attrs[attrCompressionLevel]; ok { + ii, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return Config{}, errors.Wrapf(err, "non-integer value %s specified for %s", v, attrCompressionLevel) + } + compressionConfig = compressionConfig.SetLevel(int(ii)) + } + return compressionConfig, nil +} diff --git a/vendor/github.com/moby/buildkit/util/compression/compression.go b/vendor/github.com/moby/buildkit/util/compression/compression.go index cfc26b9078..8398bfb299 100644 --- a/vendor/github.com/moby/buildkit/util/compression/compression.go +++ b/vendor/github.com/moby/buildkit/util/compression/compression.go @@ -9,11 +9,11 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" "github.com/containerd/stargz-snapshotter/estargz" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/iohelper" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type Compressor func(dest io.Writer, mediaType string) (io.WriteCloser, error) @@ -78,10 +78,9 @@ func (c Config) SetLevel(l int) Config { const ( mediaTypeDockerSchema2LayerZstd = images.MediaTypeDockerSchema2Layer + ".zstd" - mediaTypeImageLayerZstd = ocispecs.MediaTypeImageLayer + "+zstd" // unreleased image-spec#790 ) -var Default gzipType = Gzip +var Default = Gzip func parse(t string) (Type, error) { switch t { @@ -100,11 +99,11 @@ func parse(t string) (Type, error) { func fromMediaType(mediaType string) (Type, error) { switch toOCILayerType[mediaType] { - case ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerNonDistributable: + case ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. return Uncompressed, nil - case ocispecs.MediaTypeImageLayerGzip, ocispecs.MediaTypeImageLayerNonDistributableGzip: + case ocispecs.MediaTypeImageLayerGzip, ocispecs.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. return Gzip, nil - case mediaTypeImageLayerZstd, ocispecs.MediaTypeImageLayerNonDistributableZstd: + case ocispecs.MediaTypeImageLayerZstd, ocispecs.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. return Zstd, nil default: return nil, errors.Errorf("unsupported media type %s", mediaType) @@ -191,27 +190,27 @@ var toDockerLayerType = map[string]string{ images.MediaTypeDockerSchema2LayerGzip: images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2LayerForeign: images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip: images.MediaTypeDockerSchema2LayerForeignGzip, - ocispecs.MediaTypeImageLayerNonDistributable: images.MediaTypeDockerSchema2LayerForeign, - ocispecs.MediaTypeImageLayerNonDistributableGzip: images.MediaTypeDockerSchema2LayerForeignGzip, - mediaTypeImageLayerZstd: mediaTypeDockerSchema2LayerZstd, + ocispecs.MediaTypeImageLayerNonDistributable: images.MediaTypeDockerSchema2LayerForeign, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. + ocispecs.MediaTypeImageLayerNonDistributableGzip: images.MediaTypeDockerSchema2LayerForeignGzip, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. + ocispecs.MediaTypeImageLayerZstd: mediaTypeDockerSchema2LayerZstd, mediaTypeDockerSchema2LayerZstd: mediaTypeDockerSchema2LayerZstd, } var toOCILayerType = map[string]string{ ocispecs.MediaTypeImageLayer: ocispecs.MediaTypeImageLayer, - ocispecs.MediaTypeImageLayerNonDistributable: ocispecs.MediaTypeImageLayerNonDistributable, - ocispecs.MediaTypeImageLayerNonDistributableGzip: ocispecs.MediaTypeImageLayerNonDistributableGzip, - ocispecs.MediaTypeImageLayerNonDistributableZstd: ocispecs.MediaTypeImageLayerNonDistributableZstd, + ocispecs.MediaTypeImageLayerNonDistributable: ocispecs.MediaTypeImageLayerNonDistributable, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. + ocispecs.MediaTypeImageLayerNonDistributableGzip: ocispecs.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. + ocispecs.MediaTypeImageLayerNonDistributableZstd: ocispecs.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. images.MediaTypeDockerSchema2Layer: ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerGzip: ocispecs.MediaTypeImageLayerGzip, images.MediaTypeDockerSchema2LayerGzip: ocispecs.MediaTypeImageLayerGzip, - images.MediaTypeDockerSchema2LayerForeign: ocispecs.MediaTypeImageLayerNonDistributable, - images.MediaTypeDockerSchema2LayerForeignGzip: ocispecs.MediaTypeImageLayerNonDistributableGzip, - mediaTypeImageLayerZstd: mediaTypeImageLayerZstd, - mediaTypeDockerSchema2LayerZstd: mediaTypeImageLayerZstd, + images.MediaTypeDockerSchema2LayerForeign: ocispecs.MediaTypeImageLayerNonDistributable, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. + images.MediaTypeDockerSchema2LayerForeignGzip: ocispecs.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. + ocispecs.MediaTypeImageLayerZstd: ocispecs.MediaTypeImageLayerZstd, + mediaTypeDockerSchema2LayerZstd: ocispecs.MediaTypeImageLayerZstd, } -func convertLayerMediaType(mediaType string, oci bool) string { +func convertLayerMediaType(ctx context.Context, mediaType string, oci bool) string { var converted string if oci { converted = toOCILayerType[mediaType] @@ -219,16 +218,16 @@ func convertLayerMediaType(mediaType string, oci bool) string { converted = toDockerLayerType[mediaType] } if converted == "" { - logrus.Warnf("unhandled conversion for mediatype %q", mediaType) + bklog.G(ctx).Warnf("unhandled conversion for mediatype %q", mediaType) return mediaType } return converted } -func ConvertAllLayerMediaTypes(oci bool, descs ...ocispecs.Descriptor) []ocispecs.Descriptor { +func ConvertAllLayerMediaTypes(ctx context.Context, oci bool, descs ...ocispecs.Descriptor) []ocispecs.Descriptor { var converted []ocispecs.Descriptor for _, desc := range descs { - desc.MediaType = convertLayerMediaType(desc.MediaType, oci) + desc.MediaType = convertLayerMediaType(ctx, desc.MediaType, oci) converted = append(converted, desc) } return converted diff --git a/vendor/github.com/moby/buildkit/util/compression/zstd.go b/vendor/github.com/moby/buildkit/util/compression/zstd.go index f18872199f..e7de6a21c3 100644 --- a/vendor/github.com/moby/buildkit/util/compression/zstd.go +++ b/vendor/github.com/moby/buildkit/util/compression/zstd.go @@ -47,7 +47,7 @@ func (c zstdType) NeedsForceCompression() bool { } func (c zstdType) MediaType() string { - return mediaTypeImageLayerZstd + return ocispecs.MediaTypeImageLayerZstd } func (c zstdType) String() string { diff --git a/vendor/github.com/moby/buildkit/util/contentutil/copy.go b/vendor/github.com/moby/buildkit/util/contentutil/copy.go index 5039bd0c20..22ef70c12f 100644 --- a/vendor/github.com/moby/buildkit/util/contentutil/copy.go +++ b/vendor/github.com/moby/buildkit/util/contentutil/copy.go @@ -15,6 +15,7 @@ import ( ) func Copy(ctx context.Context, ingester content.Ingester, provider content.Provider, desc ocispecs.Descriptor, ref string, logger func([]byte)) error { + ctx = RegisterContentPayloadTypes(ctx) if _, err := retryhandler.New(limited.FetchHandler(ingester, &localFetcher{provider}, ref), logger)(ctx, desc); err != nil { return err } @@ -60,6 +61,7 @@ func (r *rc) Seek(offset int64, whence int) (int64, error) { } func CopyChain(ctx context.Context, ingester content.Ingester, provider content.Provider, desc ocispecs.Descriptor) error { + ctx = RegisterContentPayloadTypes(ctx) var m sync.Mutex manifestStack := []ocispecs.Descriptor{} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/refs.go b/vendor/github.com/moby/buildkit/util/contentutil/refs.go index 16fb9aafa5..d7d0b5bbe9 100644 --- a/vendor/github.com/moby/buildkit/util/contentutil/refs.go +++ b/vendor/github.com/moby/buildkit/util/contentutil/refs.go @@ -20,7 +20,6 @@ func ProviderFromRef(ref string) (ocispecs.Descriptor, content.Provider, error) headers := http.Header{} headers.Set("User-Agent", version.UserAgent()) remote := docker.NewResolver(docker.ResolverOptions{ - Client: http.DefaultClient, Headers: headers, }) @@ -40,7 +39,6 @@ func IngesterFromRef(ref string) (content.Ingester, error) { headers := http.Header{} headers.Set("User-Agent", version.UserAgent()) remote := docker.NewResolver(docker.ResolverOptions{ - Client: http.DefaultClient, Headers: headers, }) diff --git a/vendor/github.com/moby/buildkit/util/contentutil/types.go b/vendor/github.com/moby/buildkit/util/contentutil/types.go new file mode 100644 index 0000000000..19dfb65408 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/contentutil/types.go @@ -0,0 +1,15 @@ +package contentutil + +import ( + "context" + + "github.com/containerd/containerd/remotes" + intoto "github.com/in-toto/in-toto-golang/in_toto" +) + +// RegisterContentPayloadTypes registers content types that are not defined by +// default but that we expect to find in registry images. +func RegisterContentPayloadTypes(ctx context.Context) context.Context { + ctx = remotes.WithMediaTypeKeyPrefix(ctx, intoto.PayloadType, "intoto") + return ctx +} diff --git a/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go b/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go index c53a24b865..9ab9398013 100644 --- a/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go +++ b/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go @@ -10,16 +10,16 @@ import ( "github.com/containerd/containerd/oci" "github.com/containerd/containerd/pkg/cap" "github.com/containerd/containerd/pkg/userns" + "github.com/moby/buildkit/util/bklog" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) // WithInsecureSpec sets spec with All capability. func WithInsecureSpec() oci.SpecOpts { - return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { - addCaps, err := getAllCaps() + return func(ctx context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { + addCaps, err := getAllCaps(ctx) if err != nil { return err } @@ -96,7 +96,7 @@ func WithInsecureSpec() oci.SpecOpts { loopID, err := getFreeLoopID() if err != nil { - logrus.Debugf("failed to get next free loop device: %v", err) + bklog.G(ctx).Debugf("failed to get next free loop device: %v", err) } for i := 0; i <= loopID+7; i++ { @@ -142,7 +142,7 @@ func getCurrentCaps() ([]string, error) { return currentCaps, currentCapsError } -func getAllCaps() ([]string, error) { +func getAllCaps(ctx context.Context) ([]string, error) { availableCaps, err := getCurrentCaps() if err != nil { return nil, errors.Errorf("error getting current capabilities: %s", err) @@ -152,7 +152,7 @@ func getAllCaps() ([]string, error) { // they are either not supported by the kernel or dropped at the process level for _, cap := range availableCaps { if _, exists := linux35Caps[cap]; !exists { - logrus.Warnf("capability %s could not be granted for insecure mode", cap) + bklog.G(ctx).Warnf("capability %s could not be granted for insecure mode", cap) } } diff --git a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go b/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go index 3c1b673e15..82ed25205f 100644 --- a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go +++ b/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go @@ -25,13 +25,13 @@ type contextKeyT string var contextKey = contextKeyT("buildkit/util/flightcontrol.progress") // Group is a flightcontrol synchronization group -type Group struct { - mu sync.Mutex // protects m - m map[string]*call // lazily initialized +type Group[T any] struct { + mu sync.Mutex // protects m + m map[string]*call[T] // lazily initialized } // Do executes a context function syncronized by the key -func (g *Group) Do(ctx context.Context, key string, fn func(ctx context.Context) (interface{}, error)) (v interface{}, err error) { +func (g *Group[T]) Do(ctx context.Context, key string, fn func(ctx context.Context) (T, error)) (v T, err error) { var backoff time.Duration for { v, err = g.do(ctx, key, fn) @@ -53,10 +53,10 @@ func (g *Group) Do(ctx context.Context, key string, fn func(ctx context.Context) } } -func (g *Group) do(ctx context.Context, key string, fn func(ctx context.Context) (interface{}, error)) (interface{}, error) { +func (g *Group[T]) do(ctx context.Context, key string, fn func(ctx context.Context) (T, error)) (T, error) { g.mu.Lock() if g.m == nil { - g.m = make(map[string]*call) + g.m = make(map[string]*call[T]) } if c, ok := g.m[key]; ok { // register 2nd waiter @@ -78,16 +78,16 @@ func (g *Group) do(ctx context.Context, key string, fn func(ctx context.Context) return c.wait(ctx) } -type call struct { +type call[T any] struct { mu sync.Mutex - result interface{} + result T err error ready chan struct{} cleaned chan struct{} - ctx *sharedContext + ctx *sharedContext[T] ctxs []context.Context - fn func(ctx context.Context) (interface{}, error) + fn func(ctx context.Context) (T, error) once sync.Once closeProgressWriter func() @@ -95,8 +95,8 @@ type call struct { progressCtx context.Context } -func newCall(fn func(ctx context.Context) (interface{}, error)) *call { - c := &call{ +func newCall[T any](fn func(ctx context.Context) (T, error)) *call[T] { + c := &call[T]{ fn: fn, ready: make(chan struct{}), cleaned: make(chan struct{}), @@ -114,7 +114,7 @@ func newCall(fn func(ctx context.Context) (interface{}, error)) *call { return c } -func (c *call) run() { +func (c *call[T]) run() { defer c.closeProgressWriter() ctx, cancel := context.WithCancel(c.ctx) defer cancel() @@ -126,7 +126,8 @@ func (c *call) run() { close(c.ready) } -func (c *call) wait(ctx context.Context) (v interface{}, err error) { +func (c *call[T]) wait(ctx context.Context) (v T, err error) { + var empty T c.mu.Lock() // detect case where caller has just returned, let it clean up before select { @@ -134,7 +135,7 @@ func (c *call) wait(ctx context.Context) (v interface{}, err error) { c.mu.Unlock() if c.err != nil { // on error retry <-c.cleaned - return nil, errRetry + return empty, errRetry } pw, ok, _ := progress.NewFromContext(ctx) if ok { @@ -145,7 +146,7 @@ func (c *call) wait(ctx context.Context) (v interface{}, err error) { case <-c.ctx.done: // could return if no error c.mu.Unlock() <-c.cleaned - return nil, errRetry + return empty, errRetry default: } @@ -174,13 +175,13 @@ func (c *call) wait(ctx context.Context) (v interface{}, err error) { if ok { c.progressState.close(pw) } - return nil, ctx.Err() + return empty, ctx.Err() case <-c.ready: return c.result, c.err // shared not implemented yet } } -func (c *call) Deadline() (deadline time.Time, ok bool) { +func (c *call[T]) Deadline() (deadline time.Time, ok bool) { c.mu.Lock() defer c.mu.Unlock() for _, ctx := range c.ctxs { @@ -196,11 +197,11 @@ func (c *call) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false } -func (c *call) Done() <-chan struct{} { +func (c *call[T]) Done() <-chan struct{} { return c.ctx.done } -func (c *call) Err() error { +func (c *call[T]) Err() error { select { case <-c.ctx.Done(): return c.ctx.err @@ -209,7 +210,7 @@ func (c *call) Err() error { } } -func (c *call) Value(key interface{}) interface{} { +func (c *call[T]) Value(key interface{}) interface{} { if key == contextKey { return c.progressState } @@ -239,17 +240,17 @@ func (c *call) Value(key interface{}) interface{} { return nil } -type sharedContext struct { - *call +type sharedContext[T any] struct { + *call[T] done chan struct{} err error } -func newContext(c *call) *sharedContext { - return &sharedContext{call: c, done: make(chan struct{})} +func newContext[T any](c *call[T]) *sharedContext[T] { + return &sharedContext[T]{call: c, done: make(chan struct{})} } -func (sc *sharedContext) checkDone() bool { +func (sc *sharedContext[T]) checkDone() bool { sc.mu.Lock() select { case <-sc.done: diff --git a/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go b/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go index 6cd9fae98e..710bc1ec8b 100644 --- a/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go +++ b/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go @@ -1,16 +1,17 @@ package grpcerrors import ( + "context" "encoding/json" "errors" - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" rpc "github.com/gogo/googleapis/google/rpc" gogotypes "github.com/gogo/protobuf/types" "github.com/golang/protobuf/proto" //nolint:staticcheck "github.com/golang/protobuf/ptypes/any" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/stack" - "github.com/sirupsen/logrus" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -25,7 +26,7 @@ type TypedErrorProto interface { WrapError(error) error } -func ToGRPC(err error) error { +func ToGRPC(ctx context.Context, err error) error { if err == nil { return nil } @@ -64,7 +65,7 @@ func ToGRPC(err error) error { }) if len(details) > 0 { - if st2, err := withDetails(st, details...); err == nil { + if st2, err := withDetails(ctx, st, details...); err == nil { st = st2 } } @@ -72,7 +73,7 @@ func ToGRPC(err error) error { return st.Err() } -func withDetails(s *status.Status, details ...proto.Message) (*status.Status, error) { +func withDetails(ctx context.Context, s *status.Status, details ...proto.Message) (*status.Status, error) { if s.Code() == codes.OK { return nil, errors.New("no error details for status with code OK") } @@ -80,7 +81,7 @@ func withDetails(s *status.Status, details ...proto.Message) (*status.Status, er for _, detail := range details { url, err := typeurl.TypeURL(detail) if err != nil { - logrus.Warnf("ignoring typed error %T: not registered", detail) + bklog.G(ctx).Warnf("ignoring typed error %T: not registered", detail) continue } dt, err := json.Marshal(detail) diff --git a/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go b/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go index 1c17e4c67d..a592078910 100644 --- a/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go +++ b/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go @@ -15,7 +15,7 @@ func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.Una oldErr := err if err != nil { stack.Helper() - err = ToGRPC(err) + err = ToGRPC(ctx, err) } if oldErr != nil && err == nil { logErr := errors.Wrap(err, "invalid grpc error conversion") @@ -30,7 +30,7 @@ func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.Una } func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - err := ToGRPC(handler(srv, ss)) + err := ToGRPC(ss.Context(), handler(srv, ss)) if err != nil { stack.Helper() } @@ -50,5 +50,5 @@ func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grp if err != nil { stack.Helper() } - return s, ToGRPC(err) + return s, ToGRPC(ctx, err) } diff --git a/vendor/github.com/moby/buildkit/util/imageutil/buildinfo.go b/vendor/github.com/moby/buildkit/util/imageutil/buildinfo.go deleted file mode 100644 index 7196453c33..0000000000 --- a/vendor/github.com/moby/buildkit/util/imageutil/buildinfo.go +++ /dev/null @@ -1,34 +0,0 @@ -package imageutil - -import ( - "encoding/base64" - "encoding/json" - - binfotypes "github.com/moby/buildkit/util/buildinfo/types" - "github.com/pkg/errors" -) - -// BuildInfo returns build info from image config. -// -// Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md -func BuildInfo(dt []byte) (*binfotypes.BuildInfo, error) { - if len(dt) == 0 { - return nil, nil - } - var config binfotypes.ImageConfig - if err := json.Unmarshal(dt, &config); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal image config") - } - if len(config.BuildInfo) == 0 { - return nil, nil - } - dtbi, err := base64.StdEncoding.DecodeString(config.BuildInfo) - if err != nil { - return nil, err - } - var bi binfotypes.BuildInfo - if err = json.Unmarshal(dtbi, &bi); err != nil { - return nil, errors.Wrap(err, "failed to decode buildinfo from image config") - } - return &bi, nil -} diff --git a/vendor/github.com/moby/buildkit/util/imageutil/config.go b/vendor/github.com/moby/buildkit/util/imageutil/config.go index 76e0a5da35..f183db5872 100644 --- a/vendor/github.com/moby/buildkit/util/imageutil/config.go +++ b/vendor/github.com/moby/buildkit/util/imageutil/config.go @@ -3,6 +3,8 @@ package imageutil import ( "context" "encoding/json" + "fmt" + "strings" "sync" "time" @@ -13,7 +15,11 @@ import ( "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" - "github.com/moby/buildkit/util/attestation" + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/solver/pb" + srctypes "github.com/moby/buildkit/source/types" + "github.com/moby/buildkit/sourcepolicy" + spb "github.com/moby/buildkit/sourcepolicy/pb" "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/resolver/limited" @@ -47,7 +53,17 @@ func AddLease(f func(context.Context) error) { leasesMu.Unlock() } -func Config(ctx context.Context, str string, resolver remotes.Resolver, cache ContentCache, leaseManager leases.Manager, p *ocispecs.Platform) (digest.Digest, []byte, error) { +// ResolveToNonImageError is returned by the resolver when the ref is mutated by policy to a non-image ref +type ResolveToNonImageError struct { + Ref string + Updated string +} + +func (e ResolveToNonImageError) Error() string { + return fmt.Sprintf("ref mutated by policy to non-image: %s://%s -> %s", srctypes.DockerImageScheme, e.Ref, e.Updated) +} + +func Config(ctx context.Context, str string, resolver remotes.Resolver, cache ContentCache, leaseManager leases.Manager, p *ocispecs.Platform, spls []*spb.Policy) (string, digest.Digest, []byte, error) { // TODO: fix buildkit to take interface instead of struct var platform platforms.MatchComparer if p != nil { @@ -57,13 +73,44 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co } ref, err := reference.Parse(str) if err != nil { - return "", nil, errors.WithStack(err) + return "", "", nil, errors.WithStack(err) + } + + op := &pb.Op{ + Op: &pb.Op_Source{ + Source: &pb.SourceOp{ + Identifier: srctypes.DockerImageScheme + "://" + ref.String(), + }, + }, + } + + mut, err := sourcepolicy.NewEngine(spls).Evaluate(ctx, op) + if err != nil { + return "", "", nil, errors.Wrap(err, "could not resolve image due to policy") + } + + if mut { + var ( + t string + ok bool + ) + t, newRef, ok := strings.Cut(op.GetSource().GetIdentifier(), "://") + if !ok { + return "", "", nil, errors.Errorf("could not parse ref: %s", op.GetSource().GetIdentifier()) + } + if ok && t != srctypes.DockerImageScheme { + return "", "", nil, &ResolveToNonImageError{Ref: str, Updated: newRef} + } + ref, err = reference.Parse(newRef) + if err != nil { + return "", "", nil, errors.WithStack(err) + } } if leaseManager != nil { ctx2, done, err := leaseutil.WithLease(ctx, leaseManager, leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary) if err != nil { - return "", nil, errors.WithStack(err) + return "", "", nil, errors.WithStack(err) } ctx = ctx2 defer func() { @@ -94,24 +141,25 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co if desc.MediaType == "" { _, desc, err = resolver.Resolve(ctx, ref.String()) if err != nil { - return "", nil, err + return "", "", nil, err } } fetcher, err := resolver.Fetcher(ctx, ref.String()) if err != nil { - return "", nil, err + return "", "", nil, err } if desc.MediaType == images.MediaTypeDockerSchema1Manifest { - return readSchema1Config(ctx, ref.String(), desc, fetcher, cache) + dgst, dt, err := readSchema1Config(ctx, ref.String(), desc, fetcher, cache) + return ref.String(), dgst, dt, err } children := childrenConfigHandler(cache, platform) dslHandler, err := docker.AppendDistributionSourceLabel(cache, ref.String()) if err != nil { - return "", nil, err + return "", "", nil, err } handlers := []images.Handler{ @@ -120,19 +168,19 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co children, } if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil { - return "", nil, err + return "", "", nil, err } config, err := images.Config(ctx, cache, desc, platform) if err != nil { - return "", nil, err + return "", "", nil, err } dt, err := content.ReadBlob(ctx, cache, config) if err != nil { - return "", nil, err + return "", "", nil, err } - return desc.Digest, dt, nil + return ref.String(), desc.Digest, dt, nil } func childrenConfigHandler(provider content.Provider, platform platforms.MatchComparer) images.HandlerFunc { @@ -174,7 +222,7 @@ func childrenConfigHandler(provider content.Provider, platform platforms.MatchCo descs = append(descs, index.Manifests...) } case images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, docker.LegacyConfigMediaType, - attestation.MediaTypeDockerSchema2AttestationType: + intoto.PayloadType: // childless data types. return nil, nil default: diff --git a/vendor/github.com/moby/buildkit/util/leaseutil/manager.go b/vendor/github.com/moby/buildkit/util/leaseutil/manager.go index 45a35273a5..a02fb9613c 100644 --- a/vendor/github.com/moby/buildkit/util/leaseutil/manager.go +++ b/vendor/github.com/moby/buildkit/util/leaseutil/manager.go @@ -35,41 +35,49 @@ func MakeTemporary(l *leases.Lease) error { return nil } -func WithNamespace(lm leases.Manager, ns string) leases.Manager { - return &nsLM{manager: lm, ns: ns} +func WithNamespace(lm leases.Manager, ns string) *Manager { + return &Manager{manager: lm, ns: ns} } -type nsLM struct { +type Manager struct { manager leases.Manager ns string } -func (l *nsLM) Create(ctx context.Context, opts ...leases.Opt) (leases.Lease, error) { +func (l *Manager) Namespace() string { + return l.ns +} + +func (l *Manager) WithNamespace(ns string) *Manager { + return WithNamespace(l.manager, ns) +} + +func (l *Manager) Create(ctx context.Context, opts ...leases.Opt) (leases.Lease, error) { ctx = namespaces.WithNamespace(ctx, l.ns) return l.manager.Create(ctx, opts...) } -func (l *nsLM) Delete(ctx context.Context, lease leases.Lease, opts ...leases.DeleteOpt) error { +func (l *Manager) Delete(ctx context.Context, lease leases.Lease, opts ...leases.DeleteOpt) error { ctx = namespaces.WithNamespace(ctx, l.ns) return l.manager.Delete(ctx, lease, opts...) } -func (l *nsLM) List(ctx context.Context, filters ...string) ([]leases.Lease, error) { +func (l *Manager) List(ctx context.Context, filters ...string) ([]leases.Lease, error) { ctx = namespaces.WithNamespace(ctx, l.ns) return l.manager.List(ctx, filters...) } -func (l *nsLM) AddResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error { +func (l *Manager) AddResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error { ctx = namespaces.WithNamespace(ctx, l.ns) return l.manager.AddResource(ctx, lease, resource) } -func (l *nsLM) DeleteResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error { +func (l *Manager) DeleteResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error { ctx = namespaces.WithNamespace(ctx, l.ns) return l.manager.DeleteResource(ctx, lease, resource) } -func (l *nsLM) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) { +func (l *Manager) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) { ctx = namespaces.WithNamespace(ctx, l.ns) return l.manager.ListResources(ctx, lease) } diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/allowempty.s b/vendor/github.com/moby/buildkit/util/network/cniprovider/allowempty.s deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go index 2bebfd638e..2d37aa94a1 100644 --- a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go @@ -242,28 +242,56 @@ func (c *cniProvider) newNS(ctx context.Context, hostname string) (*cniNS, error cni.WithArgs("IgnoreUnknown", "1")) } - if _, err := c.CNI.Setup(context.TODO(), id, nativeID, nsOpts...); err != nil { + cniRes, err := c.CNI.Setup(context.TODO(), id, nativeID, nsOpts...) + if err != nil { deleteNetNS(nativeID) return nil, errors.Wrap(err, "CNI setup error") } trace.SpanFromContext(ctx).AddEvent("finished setting up network namespace") bklog.G(ctx).Debugf("finished setting up network namespace %s", id) - return &cniNS{ + vethName := "" + for k := range cniRes.Interfaces { + if strings.HasPrefix(k, "veth") { + if vethName != "" { + // invalid config + vethName = "" + break + } + vethName = k + } + } + + ns := &cniNS{ nativeID: nativeID, id: id, handle: c.CNI, opts: nsOpts, - }, nil + vethName: vethName, + } + + if ns.vethName != "" { + sample, err := ns.sample() + if err == nil && sample != nil { + ns.canSample = true + ns.offsetSample = sample + } + } + + return ns, nil } type cniNS struct { - pool *cniPool - handle cni.CNI - id string - nativeID string - opts []cni.NamespaceOpts - lastUsed time.Time + pool *cniPool + handle cni.CNI + id string + nativeID string + opts []cni.NamespaceOpts + lastUsed time.Time + vethName string + canSample bool + offsetSample *network.Sample + prevSample *network.Sample } func (ns *cniNS) Set(s *specs.Spec) error { @@ -271,6 +299,9 @@ func (ns *cniNS) Set(s *specs.Spec) error { } func (ns *cniNS) Close() error { + if ns.prevSample != nil { + ns.offsetSample = ns.prevSample + } if ns.pool == nil { return ns.release() } @@ -278,6 +309,30 @@ func (ns *cniNS) Close() error { return nil } +func (ns *cniNS) Sample() (*network.Sample, error) { + if !ns.canSample { + return nil, nil + } + s, err := ns.sample() + if err != nil { + return nil, err + } + if s == nil { + return nil, nil + } + if ns.offsetSample != nil { + s.TxBytes -= ns.offsetSample.TxBytes + s.RxBytes -= ns.offsetSample.RxBytes + s.TxPackets -= ns.offsetSample.TxPackets + s.RxPackets -= ns.offsetSample.RxPackets + s.TxErrors -= ns.offsetSample.TxErrors + s.RxErrors -= ns.offsetSample.RxErrors + s.TxDropped -= ns.offsetSample.TxDropped + s.RxDropped -= ns.offsetSample.RxDropped + } + return s, nil +} + func (ns *cniNS) release() error { bklog.L.Debugf("releasing cni network namespace %s", ns.id) err := ns.handle.Remove(context.TODO(), ns.id, ns.nativeID, ns.opts...) diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_linux.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_linux.go new file mode 100644 index 0000000000..8c4ac437e1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_linux.go @@ -0,0 +1,70 @@ +package cniprovider + +import ( + "path/filepath" + "strconv" + "strings" + "syscall" + + "github.com/moby/buildkit/util/network" + "github.com/pkg/errors" +) + +func (ns *cniNS) sample() (*network.Sample, error) { + dirfd, err := syscall.Open(filepath.Join("/sys/class/net", ns.vethName, "statistics"), syscall.O_RDONLY, 0) + if err != nil { + if errors.Is(err, syscall.ENOENT) || errors.Is(err, syscall.ENOTDIR) { + return nil, nil + } + return nil, err + } + defer syscall.Close(dirfd) + + buf := make([]byte, 32) + stat := &network.Sample{} + + for _, name := range []string{"tx_bytes", "rx_bytes", "tx_packets", "rx_packets", "tx_errors", "rx_errors", "tx_dropped", "rx_dropped"} { + n, err := readFileAt(dirfd, name, buf) + if err != nil { + return nil, errors.Wrapf(err, "failed to read %s", name) + } + switch name { + case "tx_bytes": + stat.TxBytes = n + case "rx_bytes": + stat.RxBytes = n + case "tx_packets": + stat.TxPackets = n + case "rx_packets": + stat.RxPackets = n + case "tx_errors": + stat.TxErrors = n + case "rx_errors": + stat.RxErrors = n + case "tx_dropped": + stat.TxDropped = n + case "rx_dropped": + stat.RxDropped = n + } + } + ns.prevSample = stat + return stat, nil +} + +func readFileAt(dirfd int, filename string, buf []byte) (int64, error) { + fd, err := syscall.Openat(dirfd, filename, syscall.O_RDONLY, 0) + if err != nil { + return 0, err + } + defer syscall.Close(fd) + + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return 0, err + } + nn, err := strconv.ParseInt(strings.TrimSpace(string(buf[:n])), 10, 64) + if err != nil { + return 0, err + } + return nn, nil +} diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_nolinux.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_nolinux.go new file mode 100644 index 0000000000..383798b962 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_nolinux.go @@ -0,0 +1,12 @@ +//go:build !linux +// +build !linux + +package cniprovider + +import ( + "github.com/moby/buildkit/util/network" +) + +func (ns *cniNS) sample() (*network.Sample, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go deleted file mode 100644 index eb6dcacefc..0000000000 --- a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build linux -// +build linux - -package cniprovider - -import ( - _ "unsafe" // required for go:linkname. -) - -//go:linkname beforeFork syscall.runtime_BeforeFork -func beforeFork() - -//go:linkname afterFork syscall.runtime_AfterFork -func afterFork() - -//go:linkname afterForkInChild syscall.runtime_AfterForkInChild -func afterForkInChild() diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go index a05bc0a441..e8a3d5054f 100644 --- a/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go @@ -4,10 +4,11 @@ package cniprovider import ( + "fmt" "os" "path/filepath" + "runtime" "syscall" - "unsafe" "github.com/containerd/containerd/oci" "github.com/moby/buildkit/util/bklog" @@ -38,7 +39,16 @@ func cleanOldNamespaces(c *cniProvider) { }() } -func createNetNS(c *cniProvider, id string) (string, error) { +// unshareAndMount needs to be called in a separate thread +func unshareAndMountNetNS(target string) error { + if err := syscall.Unshare(syscall.CLONE_NEWNET); err != nil { + return err + } + + return syscall.Mount(fmt.Sprintf("/proc/self/task/%d/ns/net", syscall.Gettid()), target, "", syscall.MS_BIND, "") +} + +func createNetNS(c *cniProvider, id string) (_ string, err error) { nsPath := filepath.Join(c.root, "net/cni", id) if err := os.MkdirAll(filepath.Dir(nsPath), 0700); err != nil { return "", err @@ -46,55 +56,34 @@ func createNetNS(c *cniProvider, id string) (string, error) { f, err := os.Create(nsPath) if err != nil { - deleteNetNS(nsPath) return "", err } - if err := f.Close(); err != nil { - deleteNetNS(nsPath) - return "", err - } - procNetNSBytes, err := syscall.BytePtrFromString("/proc/self/ns/net") - if err != nil { - deleteNetNS(nsPath) - return "", err - } - nsPathBytes, err := syscall.BytePtrFromString(nsPath) - if err != nil { - deleteNetNS(nsPath) - return "", err - } - beforeFork() - - pid, _, errno := syscall.RawSyscall6(syscall.SYS_CLONE, uintptr(syscall.SIGCHLD)|unix.CLONE_NEWNET, 0, 0, 0, 0, 0) - if errno != 0 { - afterFork() - deleteNetNS(nsPath) - return "", errno - } - - if pid != 0 { - afterFork() - var ws unix.WaitStatus - _, err = unix.Wait4(int(pid), &ws, 0, nil) - for err == syscall.EINTR { - _, err = unix.Wait4(int(pid), &ws, 0, nil) - } - + defer func() { if err != nil { deleteNetNS(nsPath) - return "", errors.Wrapf(err, "failed to find pid=%d process", pid) } - errno = syscall.Errno(ws.ExitStatus()) - if errno != 0 { - deleteNetNS(nsPath) - return "", errors.Wrapf(errno, "failed to mount %s (pid=%d)", nsPath, pid) - } - return nsPath, nil + }() + if err := f.Close(); err != nil { + return "", err } - afterForkInChild() - _, _, errno = syscall.RawSyscall6(syscall.SYS_MOUNT, uintptr(unsafe.Pointer(procNetNSBytes)), uintptr(unsafe.Pointer(nsPathBytes)), 0, uintptr(unix.MS_BIND), 0, 0) - syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0) - panic("unreachable") + + errCh := make(chan error) + + go func() { + defer close(errCh) + runtime.LockOSThread() + + if err := unshareAndMountNetNS(nsPath); err != nil { + errCh <- err + } + + // we leave the thread locked so go runtime terminates the thread + }() + + if err := <-errCh; err != nil { + return "", err + } + return nsPath, nil } func setNetNS(s *specs.Spec, nsPath string) error { diff --git a/vendor/github.com/moby/buildkit/util/network/host.go b/vendor/github.com/moby/buildkit/util/network/host.go index fbd6747d00..d1725dd22a 100644 --- a/vendor/github.com/moby/buildkit/util/network/host.go +++ b/vendor/github.com/moby/buildkit/util/network/host.go @@ -35,3 +35,7 @@ func (h *hostNS) Set(s *specs.Spec) error { func (h *hostNS) Close() error { return nil } + +func (h *hostNS) Sample() (*Sample, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/buildkit/util/network/netproviders/network_unix.go b/vendor/github.com/moby/buildkit/util/network/netproviders/network_unix.go index b8d733ec32..d521739322 100644 --- a/vendor/github.com/moby/buildkit/util/network/netproviders/network_unix.go +++ b/vendor/github.com/moby/buildkit/util/network/netproviders/network_unix.go @@ -4,8 +4,8 @@ package netproviders import ( + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/network" - "github.com/sirupsen/logrus" ) func getHostProvider() (network.Provider, bool) { @@ -13,6 +13,6 @@ func getHostProvider() (network.Provider, bool) { } func getFallback() (network.Provider, string) { - logrus.Warn("using host network as the default") + bklog.L.Warn("using host network as the default") return network.NewHostProvider(), "host" } diff --git a/vendor/github.com/moby/buildkit/util/network/netproviders/network_windows.go b/vendor/github.com/moby/buildkit/util/network/netproviders/network_windows.go index c7e460e333..0a17a36db3 100644 --- a/vendor/github.com/moby/buildkit/util/network/netproviders/network_windows.go +++ b/vendor/github.com/moby/buildkit/util/network/netproviders/network_windows.go @@ -4,8 +4,8 @@ package netproviders import ( + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/network" - "github.com/sirupsen/logrus" ) func getHostProvider() (network.Provider, bool) { @@ -13,6 +13,6 @@ func getHostProvider() (network.Provider, bool) { } func getFallback() (network.Provider, string) { - logrus.Warn("using null network as the default") + bklog.L.Warn("using null network as the default") return network.NewNoneProvider(), "" } diff --git a/vendor/github.com/moby/buildkit/util/network/network.go b/vendor/github.com/moby/buildkit/util/network/network.go index c48f1984f0..4ff1bb81c3 100644 --- a/vendor/github.com/moby/buildkit/util/network/network.go +++ b/vendor/github.com/moby/buildkit/util/network/network.go @@ -7,6 +7,17 @@ import ( specs "github.com/opencontainers/runtime-spec/specs-go" ) +type Sample struct { + RxBytes int64 `json:"rxBytes,omitempty"` + RxPackets int64 `json:"rxPackets,omitempty"` + RxErrors int64 `json:"rxErrors,omitempty"` + RxDropped int64 `json:"rxDropped,omitempty"` + TxBytes int64 `json:"txBytes,omitempty"` + TxPackets int64 `json:"txPackets,omitempty"` + TxErrors int64 `json:"txErrors,omitempty"` + TxDropped int64 `json:"txDropped,omitempty"` +} + // Provider interface for Network type Provider interface { io.Closer @@ -18,4 +29,6 @@ type Namespace interface { io.Closer // Set the namespace on the spec Set(*specs.Spec) error + + Sample() (*Sample, error) } diff --git a/vendor/github.com/moby/buildkit/util/network/none.go b/vendor/github.com/moby/buildkit/util/network/none.go index e2b9d122d6..954229b059 100644 --- a/vendor/github.com/moby/buildkit/util/network/none.go +++ b/vendor/github.com/moby/buildkit/util/network/none.go @@ -31,3 +31,7 @@ func (h *noneNS) Set(s *specs.Spec) error { func (h *noneNS) Close() error { return nil } + +func (h *noneNS) Sample() (*Sample, error) { + return nil, nil +} diff --git a/vendor/github.com/moby/buildkit/util/overlay/overlay.go b/vendor/github.com/moby/buildkit/util/overlay/overlay.go new file mode 100644 index 0000000000..b472034c74 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/overlay/overlay.go @@ -0,0 +1,8 @@ +package overlay + +import "github.com/containerd/containerd/mount" + +// IsOverlayMountType returns true if the mount type is overlay-based +func IsOverlayMountType(mnt mount.Mount) bool { + return mnt.Type == "overlay" +} diff --git a/vendor/github.com/moby/buildkit/util/overlay/overlay_linux.go b/vendor/github.com/moby/buildkit/util/overlay/overlay_linux.go index f2f69bba06..62179f9ce8 100644 --- a/vendor/github.com/moby/buildkit/util/overlay/overlay_linux.go +++ b/vendor/github.com/moby/buildkit/util/overlay/overlay_linux.go @@ -13,6 +13,7 @@ import ( "strings" "sync" "syscall" + "time" "github.com/containerd/containerd/archive" "github.com/containerd/containerd/mount" @@ -38,24 +39,23 @@ func GetUpperdir(lower, upper []mount.Mount) (string, error) { // Get layer directories of lower snapshot var lowerlayers []string lowerM := lower[0] - switch lowerM.Type { - case "bind": + if lowerM.Type == "bind" { // lower snapshot is a bind mount of one layer lowerlayers = []string{lowerM.Source} - case "overlay": + } else if IsOverlayMountType(lowerM) { // lower snapshot is an overlay mount of multiple layers var err error lowerlayers, err = GetOverlayLayers(lowerM) if err != nil { return "", err } - default: + } else { return "", errors.Errorf("cannot get layer information from mount option (type = %q)", lowerM.Type) } // Get layer directories of upper snapshot upperM := upper[0] - if upperM.Type != "overlay" { + if !IsOverlayMountType(upperM) { return "", errors.Errorf("upper snapshot isn't overlay mounted (type = %q)", upperM.Type) } upperlayers, err := GetOverlayLayers(upperM) @@ -127,7 +127,8 @@ func WriteUpperdir(ctx context.Context, w io.Writer, upperdir string, lower []mo } return mount.WithTempMount(ctx, lower, func(lowerRoot string) error { return mount.WithTempMount(ctx, upperView, func(upperViewRoot string) error { - cw := archive.NewChangeWriter(&cancellableWriter{ctx, w}, upperViewRoot) + // WithWhiteoutTime(0) will no longer need to be specified when https://github.com/containerd/containerd/pull/8764 gets merged + cw := archive.NewChangeWriter(&cancellableWriter{ctx, w}, upperViewRoot, archive.WithWhiteoutTime(time.Unix(0, 0).UTC())) if err := Changes(ctx, cw.HandleChange, upperdir, upperViewRoot, lowerRoot); err != nil { if err2 := cw.Close(); err2 != nil { return errors.Wrapf(err, "failed to record upperdir changes (close error: %v)", err2) diff --git a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go index a856db8caa..7cce8a7ca7 100644 --- a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go +++ b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go @@ -65,7 +65,7 @@ func (ps *MultiWriter) Write(id string, v interface{}) error { Sys: v, meta: ps.meta, } - return ps.WriteRawProgress(p) + return ps.writeRawProgress(p) } func (ps *MultiWriter) WriteRawProgress(p *Progress) error { diff --git a/vendor/github.com/moby/buildkit/util/progress/progressui/colors.go b/vendor/github.com/moby/buildkit/util/progress/progressui/colors.go index f6d3174769..9758f6b5d6 100644 --- a/vendor/github.com/moby/buildkit/util/progress/progressui/colors.go +++ b/vendor/github.com/moby/buildkit/util/progress/progressui/colors.go @@ -6,8 +6,8 @@ import ( "strconv" "strings" + "github.com/moby/buildkit/util/bklog" "github.com/morikuni/aec" - "github.com/sirupsen/logrus" ) var termColorMap = map[string]aec.ANSI{ @@ -41,7 +41,7 @@ func setUserDefinedTermColors(colorsEnv string) { k, v, ok := strings.Cut(field, "=") if !ok || strings.Contains(v, "=") { err := errors.New("A valid entry must have exactly two fields") - logrus.WithError(err).Warnf("Could not parse BUILDKIT_COLORS component: %s", field) + bklog.L.WithError(err).Warnf("Could not parse BUILDKIT_COLORS component: %s", field) continue } k = strings.ToLower(k) @@ -53,7 +53,7 @@ func setUserDefinedTermColors(colorsEnv string) { } } else { err := errors.New("Colors must be a name from the pre-defined list or a valid 3-part RGB value") - logrus.WithError(err).Warnf("Unknown color value found in BUILDKIT_COLORS: %s=%s", k, v) + bklog.L.WithError(err).Warnf("Unknown color value found in BUILDKIT_COLORS: %s=%s", k, v) } } } @@ -63,7 +63,7 @@ func readBuildkitColorsEnv(colorsEnv string) []string { csvReader.Comma = ':' fields, err := csvReader.Read() if err != nil { - logrus.WithError(err).Warnf("Could not parse BUILDKIT_COLORS. Falling back to defaults.") + bklog.L.WithError(err).Warnf("Could not parse BUILDKIT_COLORS. Falling back to defaults.") return nil } return fields @@ -73,12 +73,12 @@ func readRGB(v string) aec.ANSI { csvReader := csv.NewReader(strings.NewReader(v)) fields, err := csvReader.Read() if err != nil { - logrus.WithError(err).Warnf("Could not parse value %s as valid comma-separated RGB color. Ignoring.", v) + bklog.L.WithError(err).Warnf("Could not parse value %s as valid comma-separated RGB color. Ignoring.", v) return nil } if len(fields) != 3 { err = errors.New("A valid RGB color must have three fields") - logrus.WithError(err).Warnf("Could not parse value %s as valid RGB color. Ignoring.", v) + bklog.L.WithError(err).Warnf("Could not parse value %s as valid RGB color. Ignoring.", v) return nil } ok := isValidRGB(fields) @@ -103,7 +103,7 @@ func parseKeys(k string, c aec.ANSI) { case "warning": colorWarning = c default: - logrus.Warnf("Unknown key found in BUILDKIT_COLORS (expected: run, cancel, error, or warning): %s", k) + bklog.L.Warnf("Unknown key found in BUILDKIT_COLORS (expected: run, cancel, error, or warning): %s", k) } } @@ -111,14 +111,14 @@ func isValidRGB(s []string) bool { for _, n := range s { num, err := strconv.Atoi(n) if err != nil { - logrus.Warnf("A field in BUILDKIT_COLORS appears to contain an RGB value that is not an integer: %s", strings.Join(s, ",")) + bklog.L.Warnf("A field in BUILDKIT_COLORS appears to contain an RGB value that is not an integer: %s", strings.Join(s, ",")) return false } ok := isValidRGBValue(num) if ok { continue } else { - logrus.Warnf("A field in BUILDKIT_COLORS appears to contain an RGB value that is not within the valid range of 0-255: %s", strings.Join(s, ",")) + bklog.L.Warnf("A field in BUILDKIT_COLORS appears to contain an RGB value that is not within the valid range of 0-255: %s", strings.Join(s, ",")) return false } } diff --git a/vendor/github.com/moby/buildkit/util/progress/progressui/display.go b/vendor/github.com/moby/buildkit/util/progress/progressui/display.go index edbdaaa75e..4ceb4f5264 100644 --- a/vendor/github.com/moby/buildkit/util/progress/progressui/display.go +++ b/vendor/github.com/moby/buildkit/util/progress/progressui/display.go @@ -21,11 +21,37 @@ import ( "golang.org/x/time/rate" ) -func DisplaySolveStatus(ctx context.Context, phase string, c console.Console, w io.Writer, ch chan *client.SolveStatus) ([]client.VertexWarning, error) { +type displaySolveStatusOpts struct { + phase string + textDesc string + consoleDesc string +} + +type DisplaySolveStatusOpt func(b *displaySolveStatusOpts) + +func WithPhase(phase string) DisplaySolveStatusOpt { + return func(b *displaySolveStatusOpts) { + b.phase = phase + } +} + +func WithDesc(text string, console string) DisplaySolveStatusOpt { + return func(b *displaySolveStatusOpts) { + b.textDesc = text + b.consoleDesc = console + } +} + +func DisplaySolveStatus(ctx context.Context, c console.Console, w io.Writer, ch chan *client.SolveStatus, opts ...DisplaySolveStatusOpt) ([]client.VertexWarning, error) { modeConsole := c != nil - disp := &display{c: c, phase: phase} - printer := &textMux{w: w} + dsso := &displaySolveStatusOpts{} + for _, opt := range opts { + opt(dsso) + } + + disp := &display{c: c, phase: dsso.phase, desc: dsso.consoleDesc} + printer := &textMux{w: w, desc: dsso.textDesc} if disp.phase == "" { disp.phase = "Building" @@ -556,7 +582,7 @@ func (t *trace) update(s *client.SolveStatus, termWidth int) { } else if sec < 100 { prec = 2 } - v.logs = append(v.logs, []byte(fmt.Sprintf("#%d %s %s", v.index, fmt.Sprintf("%.[2]*[1]f", sec, prec), dt))) + v.logs = append(v.logs, []byte(fmt.Sprintf("%s %s", fmt.Sprintf("%.[2]*[1]f", sec, prec), dt))) } i++ }) @@ -711,6 +737,7 @@ func addTime(tm *time.Time, d time.Duration) *time.Time { type display struct { c console.Console phase string + desc string lineCount int repeated bool } @@ -784,7 +811,11 @@ func (disp *display) print(d displayInfo, width, height int, all bool) { defer fmt.Fprint(disp.c, aec.Show) out := fmt.Sprintf("[+] %s %.1fs (%d/%d) %s", disp.phase, time.Since(d.startTime).Seconds(), d.countCompleted, d.countTotal, statusStr) - out = align(out, "", width) + if disp.desc != "" { + out = align(out, disp.desc, width-1) + } else { + out = align(out, "", width) + } fmt.Fprintln(disp.c, out) lineCount := 0 for _, j := range d.jobs { diff --git a/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go b/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go index cc8e45be29..338079d474 100644 --- a/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go +++ b/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go @@ -32,6 +32,7 @@ type textMux struct { last map[string]lastStatus notFirst bool nextIndex int + desc string } func (p *textMux) printVtx(t *trace, dgst digest.Digest) { @@ -63,6 +64,9 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) { if p.notFirst { fmt.Fprintln(p.w, "") } else { + if p.desc != "" { + fmt.Fprintf(p.w, "#0 %s\n\n", p.desc) + } p.notFirst = true } @@ -139,10 +143,13 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) { } for i, l := range v.logs { - if i == 0 { + if i == 0 && v.logsOffset != 0 { // index has already been printed l = l[v.logsOffset:] + fmt.Fprintf(p.w, "%s", l) + } else { + fmt.Fprintf(p.w, "#%d %s", v.index, []byte(l)) } - fmt.Fprintf(p.w, "%s", []byte(l)) + if i != len(v.logs)-1 || !v.logsPartial { fmt.Fprintln(p.w, "") } diff --git a/vendor/github.com/moby/buildkit/util/pull/pull.go b/vendor/github.com/moby/buildkit/util/pull/pull.go index c66c4e784a..9527953c48 100644 --- a/vendor/github.com/moby/buildkit/util/pull/pull.go +++ b/vendor/github.com/moby/buildkit/util/pull/pull.go @@ -10,7 +10,7 @@ import ( "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" - "github.com/containerd/containerd/remotes/docker/schema1" + "github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // SA1019 deprecated "github.com/moby/buildkit/session" "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/flightcontrol" @@ -32,7 +32,7 @@ type Puller struct { Src reference.Spec Platform ocispecs.Platform - g flightcontrol.Group + g flightcontrol.Group[struct{}] resolveErr error resolveDone bool desc ocispecs.Descriptor @@ -54,9 +54,9 @@ type PulledManifests struct { } func (p *Puller) resolve(ctx context.Context, resolver remotes.Resolver) error { - _, err := p.g.Do(ctx, "", func(ctx context.Context) (_ interface{}, err error) { + _, err := p.g.Do(ctx, "", func(ctx context.Context) (_ struct{}, err error) { if p.resolveErr != nil || p.resolveDone { - return nil, p.resolveErr + return struct{}{}, p.resolveErr } defer func() { if !errors.Is(err, context.Canceled) { @@ -68,12 +68,12 @@ func (p *Puller) resolve(ctx context.Context, resolver remotes.Resolver) error { } ref, desc, err := resolver.Resolve(ctx, p.Src.String()) if err != nil { - return nil, err + return struct{}{}, err } p.desc = desc p.ref = ref p.resolveDone = true - return nil, nil + return struct{}{}, nil }) return err } @@ -233,15 +233,15 @@ func filterLayerBlobs(metadata map[digest.Digest]ocispecs.Descriptor, mu sync.Lo switch desc.MediaType { case ocispecs.MediaTypeImageLayer, - ocispecs.MediaTypeImageLayerNonDistributable, + ocispecs.MediaTypeImageLayerNonDistributable, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerForeign, ocispecs.MediaTypeImageLayerGzip, images.MediaTypeDockerSchema2LayerGzip, - ocispecs.MediaTypeImageLayerNonDistributableGzip, + ocispecs.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. images.MediaTypeDockerSchema2LayerForeignGzip, ocispecs.MediaTypeImageLayerZstd, - ocispecs.MediaTypeImageLayerNonDistributableZstd: + ocispecs.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // ignore SA1019: Non-distributable layers are deprecated, and not recommended for future use. return nil, images.ErrSkipDesc default: if metadata != nil { diff --git a/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go b/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go index 93c50106f7..ee8fcadb20 100644 --- a/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go +++ b/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go @@ -8,6 +8,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/remotes" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/progress" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -33,13 +34,14 @@ func (p *ProviderWithProgress) ReaderAt(ctx context.Context, desc ocispecs.Descr ctx, cancel := context.WithCancel(ctx) doneCh := make(chan struct{}) go trackProgress(ctx, desc, p.Manager, doneCh) - return readerAtWithCancel{ReaderAt: ra, cancel: cancel, doneCh: doneCh}, nil + return readerAtWithCancel{ReaderAt: ra, cancel: cancel, doneCh: doneCh, logger: bklog.G(ctx)}, nil } type readerAtWithCancel struct { content.ReaderAt cancel func() doneCh <-chan struct{} + logger *logrus.Entry } func (ra readerAtWithCancel) Close() error { @@ -47,7 +49,7 @@ func (ra readerAtWithCancel) Close() error { select { case <-ra.doneCh: case <-time.After(time.Second): - logrus.Warn("timeout waiting for pull progress to complete") + ra.logger.Warn("timeout waiting for pull progress to complete") } return ra.ReaderAt.Close() } @@ -66,13 +68,14 @@ func (f *FetcherWithProgress) Fetch(ctx context.Context, desc ocispecs.Descripto ctx, cancel := context.WithCancel(ctx) doneCh := make(chan struct{}) go trackProgress(ctx, desc, f.Manager, doneCh) - return readerWithCancel{ReadCloser: rc, cancel: cancel, doneCh: doneCh}, nil + return readerWithCancel{ReadCloser: rc, cancel: cancel, doneCh: doneCh, logger: bklog.G(ctx)}, nil } type readerWithCancel struct { io.ReadCloser cancel func() doneCh <-chan struct{} + logger *logrus.Entry } func (r readerWithCancel) Close() error { @@ -80,7 +83,7 @@ func (r readerWithCancel) Close() error { select { case <-r.doneCh: case <-time.After(time.Second): - logrus.Warn("timeout waiting for pull progress to complete") + r.logger.Warn("timeout waiting for pull progress to complete") } return r.ReadCloser.Close() } @@ -90,10 +93,10 @@ func trackProgress(ctx context.Context, desc ocispecs.Descriptor, manager PullMa ticker := time.NewTicker(150 * time.Millisecond) defer ticker.Stop() - go func() { + go func(ctx context.Context) { <-ctx.Done() ticker.Stop() - }() + }(ctx) pw, _, _ := progress.NewFromContext(ctx) defer pw.Close() @@ -120,7 +123,7 @@ func trackProgress(ctx context.Context, desc ocispecs.Descriptor, manager PullMa }) continue } else if !errors.Is(err, errdefs.ErrNotFound) { - logrus.Errorf("unexpected error getting ingest status of %q: %v", ingestRef, err) + bklog.G(ctx).Errorf("unexpected error getting ingest status of %q: %v", ingestRef, err) return } diff --git a/vendor/github.com/moby/buildkit/util/purl/image.go b/vendor/github.com/moby/buildkit/util/purl/image.go index b3364ba4ce..9eb53f6840 100644 --- a/vendor/github.com/moby/buildkit/util/purl/image.go +++ b/vendor/github.com/moby/buildkit/util/purl/image.go @@ -14,7 +14,7 @@ import ( // RefToPURL converts an image reference with optional platform constraint to a package URL. // Image references are defined in https://github.com/distribution/distribution/blob/v2.8.1/reference/reference.go#L1 // Package URLs are defined in https://github.com/package-url/purl-spec -func RefToPURL(ref string, platform *ocispecs.Platform) (string, error) { +func RefToPURL(purlType string, ref string, platform *ocispecs.Platform) (string, error) { named, err := reference.ParseNormalizedNamed(ref) if err != nil { return "", errors.Wrapf(err, "failed to parse ref %q", ref) @@ -52,7 +52,7 @@ func RefToPURL(ref string, platform *ocispecs.Platform) (string, error) { }) } - p := packageurl.NewPackageURL("docker", ns, name, version, qualifiers, "") + p := packageurl.NewPackageURL(purlType, ns, name, version, qualifiers, "") return p.ToString(), nil } diff --git a/vendor/github.com/moby/buildkit/util/push/push.go b/vendor/github.com/moby/buildkit/util/push/push.go index 881b2fd86f..bef56e5ba3 100644 --- a/vendor/github.com/moby/buildkit/util/push/push.go +++ b/vendor/github.com/moby/buildkit/util/push/push.go @@ -14,8 +14,10 @@ import ( "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/docker/distribution/reference" + intoto "github.com/in-toto/in-toto-golang/in_toto" "github.com/moby/buildkit/session" - "github.com/moby/buildkit/util/attestation" + "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/imageutil" "github.com/moby/buildkit/util/progress" @@ -27,7 +29,6 @@ import ( digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type pusher struct { @@ -46,6 +47,7 @@ func Pusher(ctx context.Context, resolver remotes.Resolver, ref string) (remotes } func Push(ctx context.Context, sm *session.Manager, sid string, provider content.Provider, manager content.Manager, dgst digest.Digest, ref string, insecure bool, hosts docker.RegistryHosts, byDigest bool, annotations map[digest.Digest]map[string]string) error { + ctx = contentutil.RegisterContentPayloadTypes(ctx) desc := ocispecs.Descriptor{ Digest: dgst, } @@ -250,11 +252,11 @@ func childrenHandler(provider content.Provider) images.HandlerFunc { case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerGzip, - attestation.MediaTypeDockerSchema2AttestationType: + intoto.PayloadType: // childless data types. return nil, nil default: - logrus.Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType) + bklog.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType) } return descs, nil @@ -289,7 +291,7 @@ func updateDistributionSourceHandler(manager content.Manager, pushF images.Handl // update distribution source to layer if islayer { if _, err := updateF(ctx, desc); err != nil { - logrus.Warnf("failed to update distribution source for layer %v: %v", desc.Digest, err) + bklog.G(ctx).Warnf("failed to update distribution source for layer %v: %v", desc.Digest, err) } } return children, nil @@ -297,12 +299,12 @@ func updateDistributionSourceHandler(manager content.Manager, pushF images.Handl } func dedupeHandler(h images.HandlerFunc) images.HandlerFunc { - var g flightcontrol.Group + var g flightcontrol.Group[[]ocispecs.Descriptor] res := map[digest.Digest][]ocispecs.Descriptor{} var mu sync.Mutex return images.HandlerFunc(func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { - res, err := g.Do(ctx, desc.Digest.String(), func(ctx context.Context) (interface{}, error) { + return g.Do(ctx, desc.Digest.String(), func(ctx context.Context) ([]ocispecs.Descriptor, error) { mu.Lock() if r, ok := res[desc.Digest]; ok { mu.Unlock() @@ -320,12 +322,5 @@ func dedupeHandler(h images.HandlerFunc) images.HandlerFunc { mu.Unlock() return children, nil }) - if err != nil { - return nil, err - } - if res == nil { - return nil, nil - } - return res.([]ocispecs.Descriptor), nil }) } diff --git a/vendor/github.com/moby/buildkit/util/resolver/authorizer.go b/vendor/github.com/moby/buildkit/util/resolver/authorizer.go index d97d32dd6f..6c89cf7419 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/authorizer.go +++ b/vendor/github.com/moby/buildkit/util/resolver/authorizer.go @@ -33,7 +33,7 @@ type authHandlerNS struct { hosts map[string][]docker.RegistryHost muHosts sync.Mutex sm *session.Manager - g flightcontrol.Group + g flightcontrol.Group[[]docker.RegistryHost] } func newAuthHandlerNS(sm *session.Manager) *authHandlerNS { @@ -230,7 +230,7 @@ type authResult struct { // authHandler is used to handle auth request per registry server. type authHandler struct { - g flightcontrol.Group + g flightcontrol.Group[*authResult] client *http.Client @@ -295,7 +295,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context, sm *session.Manager, g // Docs: https://docs.docker.com/registry/spec/auth/scope scoped := strings.Join(to.Scopes, " ") - res, err := ah.g.Do(ctx, scoped, func(ctx context.Context) (interface{}, error) { + res, err := ah.g.Do(ctx, scoped, func(ctx context.Context) (*authResult, error) { ah.scopedTokensMu.Lock() r, exist := ah.scopedTokens[scoped] ah.scopedTokensMu.Unlock() @@ -313,15 +313,10 @@ func (ah *authHandler) doBearerAuth(ctx context.Context, sm *session.Manager, g ah.scopedTokensMu.Unlock() return r, nil }) - if err != nil || res == nil { return "", err } - r := res.(*authResult) - if r == nil { - return "", nil - } - return r.token, nil + return res.token, nil } func (ah *authHandler) fetchToken(ctx context.Context, sm *session.Manager, g session.Group, to auth.TokenOptions) (r *authResult, err error) { diff --git a/vendor/github.com/moby/buildkit/util/resolver/limited/group.go b/vendor/github.com/moby/buildkit/util/resolver/limited/group.go index 7fdd947a02..934bd4f4eb 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/limited/group.go +++ b/vendor/github.com/moby/buildkit/util/resolver/limited/group.go @@ -11,8 +11,8 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/remotes" "github.com/docker/distribution/reference" + "github.com/moby/buildkit/util/bklog" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" "golang.org/x/sync/semaphore" ) @@ -119,7 +119,7 @@ func (f *fetcher) Fetch(ctx context.Context, desc ocispecs.Descriptor) (io.ReadC rcw := &readCloser{ReadCloser: rc} closer := func() { if !rcw.closed { - logrus.Warnf("fetcher not closed cleanly: %s", desc.Digest) + bklog.G(ctx).Warnf("fetcher not closed cleanly: %s", desc.Digest) } release() } diff --git a/vendor/github.com/moby/buildkit/util/resolver/pool.go b/vendor/github.com/moby/buildkit/util/resolver/pool.go index 292ca2e614..7b6a2ef50d 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/pool.go +++ b/vendor/github.com/moby/buildkit/util/resolver/pool.go @@ -131,7 +131,7 @@ type Resolver struct { // HostsFunc implements registry configuration of this Resolver func (r *Resolver) HostsFunc(host string) ([]docker.RegistryHost, error) { return func(domain string) ([]docker.RegistryHost, error) { - v, err := r.handler.g.Do(context.TODO(), domain, func(ctx context.Context) (interface{}, error) { + v, err := r.handler.g.Do(context.TODO(), domain, func(ctx context.Context) ([]docker.RegistryHost, error) { // long lock not needed because flightcontrol.Do r.handler.muHosts.Lock() v, ok := r.handler.hosts[domain] @@ -151,13 +151,12 @@ func (r *Resolver) HostsFunc(host string) ([]docker.RegistryHost, error) { if err != nil || v == nil { return nil, err } - vv := v.([]docker.RegistryHost) - if len(vv) == 0 { + if len(v) == 0 { return nil, nil } // make a copy so authorizer is set on unique instance - res := make([]docker.RegistryHost, len(vv)) - copy(res, vv) + res := make([]docker.RegistryHost, len(v)) + copy(res, v) auth := newDockerAuthorizer(res[0].Client, r.handler, r.sm, r.g) for i := range res { res[i].Authorizer = auth diff --git a/vendor/github.com/moby/buildkit/util/resolver/resolver.go b/vendor/github.com/moby/buildkit/util/resolver/resolver.go index 0639a1b623..0abe597e08 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/resolver.go +++ b/vendor/github.com/moby/buildkit/util/resolver/resolver.go @@ -6,6 +6,7 @@ import ( "net" "net/http" "os" + "path" "path/filepath" "runtime" "strings" @@ -17,6 +18,10 @@ import ( "github.com/pkg/errors" ) +const ( + defaultPath = "/v2" +) + func fillInsecureOpts(host string, c config.RegistryConfig, h docker.RegistryHost) ([]docker.RegistryHost, error) { var hosts []docker.RegistryHost @@ -126,14 +131,7 @@ func NewRegistryConfig(m map[string]config.RegistryConfig) docker.RegistryHosts var out []docker.RegistryHost for _, mirror := range c.Mirrors { - h := docker.RegistryHost{ - Scheme: "https", - Client: newDefaultClient(), - Host: mirror, - Path: "/v2", - Capabilities: docker.HostCapabilityPull | docker.HostCapabilityResolve, - } - + h := newMirrorRegistryHost(mirror) hosts, err := fillInsecureOpts(mirror, m[mirror], h) if err != nil { return nil, err @@ -169,6 +167,20 @@ func NewRegistryConfig(m map[string]config.RegistryConfig) docker.RegistryHosts ) } +func newMirrorRegistryHost(mirror string) docker.RegistryHost { + mirrorHost, mirrorPath := extractMirrorHostAndPath(mirror) + path := path.Join(defaultPath, mirrorPath) + h := docker.RegistryHost{ + Scheme: "https", + Client: newDefaultClient(), + Host: mirrorHost, + Path: path, + Capabilities: docker.HostCapabilityPull | docker.HostCapabilityResolve, + } + + return h +} + func newDefaultClient() *http.Client { return &http.Client{ Transport: tracing.NewTransport(newDefaultTransport()), diff --git a/vendor/github.com/moby/buildkit/util/resolver/utils.go b/vendor/github.com/moby/buildkit/util/resolver/utils.go new file mode 100644 index 0000000000..cdcd5b83d6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/resolver/utils.go @@ -0,0 +1,22 @@ +package resolver + +import ( + "fmt" + "net/url" + "strings" +) + +func extractMirrorHostAndPath(mirror string) (string, string) { + var path string + host := mirror + + u, err := url.Parse(mirror) + if err != nil || u.Host == "" { + u, err = url.Parse(fmt.Sprintf("//%s", mirror)) + } + if err != nil || u.Host == "" { + return host, path + } + + return u.Host, strings.TrimRight(u.Path, "/") +} diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.go b/vendor/github.com/moby/buildkit/util/stack/stack.go index 18d03630b4..fb9fc3ddf5 100644 --- a/vendor/github.com/moby/buildkit/util/stack/stack.go +++ b/vendor/github.com/moby/buildkit/util/stack/stack.go @@ -9,7 +9,7 @@ import ( "strings" "sync" - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" "github.com/pkg/errors" ) diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.pb.go b/vendor/github.com/moby/buildkit/util/stack/stack.pb.go index c4a73a68f4..43809d4876 100644 --- a/vendor/github.com/moby/buildkit/util/stack/stack.pb.go +++ b/vendor/github.com/moby/buildkit/util/stack/stack.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 +// protoc-gen-go v1.30.0 // protoc v3.11.4 // source: stack.proto diff --git a/vendor/github.com/moby/buildkit/util/system/path.go b/vendor/github.com/moby/buildkit/util/system/path.go index f6dc70dc8d..94f9a826f2 100644 --- a/vendor/github.com/moby/buildkit/util/system/path.go +++ b/vendor/github.com/moby/buildkit/util/system/path.go @@ -1,5 +1,13 @@ package system +import ( + "path" + "path/filepath" + "strings" + + "github.com/pkg/errors" +) + // DefaultPathEnvUnix is unix style list of directories to search for // executables. Each directory is separated from the next by a colon // ':' character . @@ -16,3 +24,201 @@ func DefaultPathEnv(os string) string { } return DefaultPathEnvUnix } + +// NormalizePath cleans the path based on the operating system the path is meant for. +// It takes into account a potential parent path, and will join the path to the parent +// if the path is relative. Additionally, it will apply the folliwing rules: +// - always return an absolute path +// - always strip drive letters for Windows paths +// - optionally keep the trailing slashes on paths +// - paths are returned using forward slashes +func NormalizePath(parent, newPath, inputOS string, keepSlash bool) (string, error) { + if inputOS == "" { + inputOS = "linux" + } + + newPath = ToSlash(newPath, inputOS) + parent = ToSlash(parent, inputOS) + origPath := newPath + + if parent == "" { + parent = "/" + } + + var err error + parent, err = CheckSystemDriveAndRemoveDriveLetter(parent, inputOS) + if err != nil { + return "", errors.Wrap(err, "removing drive letter") + } + + if !IsAbs(parent, inputOS) { + parent = path.Join("/", parent) + } + + if newPath == "" { + // New workdir is empty. Use the "current" workdir. It should already + // be an absolute path. + newPath = parent + } + + newPath, err = CheckSystemDriveAndRemoveDriveLetter(newPath, inputOS) + if err != nil { + return "", errors.Wrap(err, "removing drive letter") + } + + if !IsAbs(newPath, inputOS) { + // The new WD is relative. Join it to the previous WD. + newPath = path.Join(parent, newPath) + } + + if keepSlash { + if strings.HasSuffix(origPath, "/") && !strings.HasSuffix(newPath, "/") { + newPath += "/" + } else if strings.HasSuffix(origPath, "/.") { + if newPath != "/" { + newPath += "/" + } + newPath += "." + } + } + + return ToSlash(newPath, inputOS), nil +} + +func ToSlash(inputPath, inputOS string) string { + if inputOS != "windows" { + return inputPath + } + return strings.Replace(inputPath, "\\", "/", -1) +} + +func FromSlash(inputPath, inputOS string) string { + separator := "/" + if inputOS == "windows" { + separator = "\\" + } + return strings.Replace(inputPath, "/", separator, -1) +} + +// NormalizeWorkdir will return a normalized version of the new workdir, given +// the currently configured workdir and the desired new workdir. When setting a +// new relative workdir, it will be joined to the previous workdir or default to +// the root folder. +// On Windows we remove the drive letter and convert the path delimiter to "\". +// Paths that begin with os.PathSeparator are considered absolute even on Windows. +func NormalizeWorkdir(current, wd string, inputOS string) (string, error) { + if inputOS == "" { + inputOS = "linux" + } + + wd, err := NormalizePath(current, wd, inputOS, false) + if err != nil { + return "", errors.Wrap(err, "normalizing working directory") + } + + // Make sure we use the platform specific path separator. HCS does not like forward + // slashes in CWD. + return FromSlash(wd, inputOS), nil +} + +// IsAbs returns a boolean value indicating whether or not the path +// is absolute. On Linux, this is just a wrapper for filepath.IsAbs(). +// On Windows, we strip away the drive letter (if any), clean the path, +// and check whether or not the path starts with a filepath.Separator. +// This function is meant to check if a path is absolute, in the context +// of a COPY, ADD or WORKDIR, which have their root set in the mount point +// of the writable layer we are mutating. The filepath.IsAbs() function on +// Windows will not work in these scenatios, as it will return true for paths +// that: +// - Begin with drive letter (DOS style paths) +// - Are volume paths \\?\Volume{UUID} +// - Are UNC paths +func IsAbs(pth, inputOS string) bool { + if inputOS == "" { + inputOS = "linux" + } + cleanedPath, err := CheckSystemDriveAndRemoveDriveLetter(pth, inputOS) + if err != nil { + return false + } + cleanedPath = ToSlash(cleanedPath, inputOS) + // We stripped any potential drive letter and converted any backslashes to + // forward slashes. We can safely use path.IsAbs() for both Windows and Linux. + return path.IsAbs(cleanedPath) +} + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// For linux, this is a no-op. +// +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. It also converts any backslash to forward slash. The conversion +// to OS specific separator should happen as late as possible (ie: before passing the +// value to the function that will actually use it). Paths are parsed and code paths are +// triggered starting with the client and all the way down to calling into the runtime +// environment. The client may run on a foreign OS from the one the build will be triggered +// (Windows clients connecting to Linux or vice versa). +// Keeping the file separator consistent until the last moment is desirable. +// +// We need the Windows path without the drive letter so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:somepath --> somepath // This is a relative path to the CWD set for that drive letter +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +// +// UNC paths can refer to multiple types of paths. From local filesystem paths, +// to remote filesystems like SMB or named pipes. +// There is no sane way to support this without adding a lot of complexity +// which I am not sure is worth it. +// \\.\C$\a --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string, inputOS string) (string, error) { + if inputOS == "" { + inputOS = "linux" + } + + if inputOS != "windows" { + return path, nil + } + + if len(path) == 2 && string(path[1]) == ":" { + return "", errors.Errorf("No relative path specified in %q", path) + } + + // UNC paths should error out + if len(path) >= 2 && ToSlash(path[:2], inputOS) == "//" { + return "", errors.Errorf("UNC paths are not supported") + } + + parts := strings.SplitN(path, ":", 2) + // Path does not have a drive letter. Just return it. + if len(parts) < 2 { + return ToSlash(filepath.Clean(path), inputOS), nil + } + + // We expect all paths to be in C: + if !strings.EqualFold(parts[0], "c") { + return "", errors.New("The specified path is not on the system drive (C:)") + } + + // A path of the form F:somepath, is a path that is relative CWD set for a particular + // drive letter. See: + // https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file#fully-qualified-vs-relative-paths + // + // C:\>mkdir F:somepath + // C:\>dir F:\ + // Volume in drive F is New Volume + // Volume Serial Number is 86E5-AB64 + // + // Directory of F:\ + // + // 11/27/2022 02:22 PM somepath + // 0 File(s) 0 bytes + // 1 Dir(s) 1,052,876,800 bytes free + // + // We must return the second element of the split path, as is, without attempting to convert + // it to an absolute path. We have no knowledge of the CWD; that is treated elsewhere. + return ToSlash(filepath.Clean(parts[1]), inputOS), nil +} diff --git a/vendor/github.com/moby/buildkit/util/system/path_unix.go b/vendor/github.com/moby/buildkit/util/system/path_unix.go deleted file mode 100644 index ff01143eef..0000000000 --- a/vendor/github.com/moby/buildkit/util/system/path_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !windows -// +build !windows - -package system - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. This is a no-op on Linux. -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return path, nil -} diff --git a/vendor/github.com/moby/buildkit/util/system/path_windows.go b/vendor/github.com/moby/buildkit/util/system/path_windows.go deleted file mode 100644 index cc7b664d8b..0000000000 --- a/vendor/github.com/moby/buildkit/util/system/path_windows.go +++ /dev/null @@ -1,35 +0,0 @@ -//go:build windows -// +build windows - -package system - -import ( - "path/filepath" - "strings" - - "github.com/pkg/errors" -) - -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be contatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", errors.Errorf("No relative path specified in %q", path) - } - if !filepath.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", errors.New("The specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/vendor/github.com/moby/buildkit/util/tracing/detect/detect.go b/vendor/github.com/moby/buildkit/util/tracing/detect/detect.go index 13e54bdefc..27c969180a 100644 --- a/vendor/github.com/moby/buildkit/util/tracing/detect/detect.go +++ b/vendor/github.com/moby/buildkit/util/tracing/detect/detect.go @@ -12,7 +12,7 @@ import ( "github.com/pkg/errors" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" ) @@ -79,12 +79,6 @@ func getExporter() (sdktrace.SpanExporter, error) { return nil, err } - if exp != nil { - exp = &threadSafeExporterWrapper{ - exporter: exp, - } - } - if Recorder != nil { Recorder.SpanExporter = exp exp = Recorder diff --git a/vendor/github.com/moby/buildkit/util/tracing/detect/threadsafe.go b/vendor/github.com/moby/buildkit/util/tracing/detect/threadsafe.go deleted file mode 100644 index 51d14448df..0000000000 --- a/vendor/github.com/moby/buildkit/util/tracing/detect/threadsafe.go +++ /dev/null @@ -1,26 +0,0 @@ -package detect - -import ( - "context" - "sync" - - sdktrace "go.opentelemetry.io/otel/sdk/trace" -) - -// threadSafeExporterWrapper wraps an OpenTelemetry SpanExporter and makes it thread-safe. -type threadSafeExporterWrapper struct { - mu sync.Mutex - exporter sdktrace.SpanExporter -} - -func (tse *threadSafeExporterWrapper) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { - tse.mu.Lock() - defer tse.mu.Unlock() - return tse.exporter.ExportSpans(ctx, spans) -} - -func (tse *threadSafeExporterWrapper) Shutdown(ctx context.Context) error { - tse.mu.Lock() - defer tse.mu.Unlock() - return tse.exporter.Shutdown(ctx) -} diff --git a/vendor/github.com/moby/buildkit/util/tracing/tracing.go b/vendor/github.com/moby/buildkit/util/tracing/tracing.go index fd7f0ba7d5..97f538f575 100644 --- a/vendor/github.com/moby/buildkit/util/tracing/tracing.go +++ b/vendor/github.com/moby/buildkit/util/tracing/tracing.go @@ -12,7 +12,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" ) diff --git a/vendor/github.com/moby/buildkit/util/tracing/transform/instrumentation.go b/vendor/github.com/moby/buildkit/util/tracing/transform/instrumentation.go index 216a364c63..6a965f0e86 100644 --- a/vendor/github.com/moby/buildkit/util/tracing/transform/instrumentation.go +++ b/vendor/github.com/moby/buildkit/util/tracing/transform/instrumentation.go @@ -6,12 +6,12 @@ import ( "go.opentelemetry.io/otel/sdk/instrumentation" ) -func instrumentationLibrary(il *commonpb.InstrumentationLibrary) instrumentation.Library { - if il == nil { - return instrumentation.Library{} +func instrumentationScope(is *commonpb.InstrumentationScope) instrumentation.Scope { + if is == nil { + return instrumentation.Scope{} } - return instrumentation.Library{ - Name: il.Name, - Version: il.Version, + return instrumentation.Scope{ + Name: is.Name, + Version: is.Version, } } diff --git a/vendor/github.com/moby/buildkit/util/tracing/transform/span.go b/vendor/github.com/moby/buildkit/util/tracing/transform/span.go index f07d0c98e9..9f7924c4a7 100644 --- a/vendor/github.com/moby/buildkit/util/tracing/transform/span.go +++ b/vendor/github.com/moby/buildkit/util/tracing/transform/span.go @@ -31,12 +31,12 @@ func Spans(sdl []*tracepb.ResourceSpans) []tracesdk.ReadOnlySpan { continue } - for _, sdi := range sd.InstrumentationLibrarySpans { + for _, sdi := range sd.ScopeSpans { sda := make([]tracesdk.ReadOnlySpan, len(sdi.Spans)) for i, s := range sdi.Spans { sda[i] = &readOnlySpan{ pb: s, - il: sdi.InstrumentationLibrary, + is: sdi.Scope, resource: sd.Resource, schemaURL: sd.SchemaUrl, } @@ -53,7 +53,7 @@ type readOnlySpan struct { tracesdk.ReadOnlySpan pb *tracepb.Span - il *v11.InstrumentationLibrary + is *v11.InstrumentationScope resource *v1.Resource schemaURL string } @@ -122,8 +122,13 @@ func (s *readOnlySpan) Status() tracesdk.Status { } } +func (s *readOnlySpan) InstrumentationScope() instrumentation.Scope { + return instrumentationScope(s.is) +} + +// Deprecated: use InstrumentationScope. func (s *readOnlySpan) InstrumentationLibrary() instrumentation.Library { - return instrumentationLibrary(s.il) + return s.InstrumentationScope() } // Resource returns information about the entity that produced the span. diff --git a/vendor/github.com/moby/buildkit/util/winlayers/applier.go b/vendor/github.com/moby/buildkit/util/winlayers/applier.go index f2b147d674..e415a5e876 100644 --- a/vendor/github.com/moby/buildkit/util/winlayers/applier.go +++ b/vendor/github.com/moby/buildkit/util/winlayers/applier.go @@ -37,6 +37,12 @@ type winApplier struct { } func (s *winApplier) Apply(ctx context.Context, desc ocispecs.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispecs.Descriptor, err error) { + // HACK:, containerd doesn't know about vnd.docker.image.rootfs.diff.tar.zstd, but that + // media type is compatible w/ the oci type, so just lie and say it's the oci type + if desc.MediaType == images.MediaTypeDockerSchema2Layer+".zstd" { + desc.MediaType = ocispecs.MediaTypeImageLayerZstd + } + if !hasWindowsLayerMode(ctx) { return s.apply(ctx, desc, mounts, opts...) } diff --git a/vendor/github.com/moby/buildkit/util/winlayers/differ.go b/vendor/github.com/moby/buildkit/util/winlayers/differ.go index fe2b1c2161..effe0c16cb 100644 --- a/vendor/github.com/moby/buildkit/util/winlayers/differ.go +++ b/vendor/github.com/moby/buildkit/util/winlayers/differ.go @@ -15,12 +15,10 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/mount" + log "github.com/moby/buildkit/util/bklog" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - log "github.com/moby/buildkit/util/bklog" ) const ( @@ -109,7 +107,7 @@ func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opt if err != nil { return errors.Wrap(err, "failed to get compressed stream") } - w, discard, done := makeWindowsLayer(io.MultiWriter(compressed, dgstr.Hash())) + w, discard, done := makeWindowsLayer(ctx, io.MultiWriter(compressed, dgstr.Hash())) err = archive.WriteDiff(ctx, w, lowerRoot, upperRoot) if err != nil { discard(err) @@ -125,7 +123,7 @@ func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opt } config.Labels["containerd.io/uncompressed"] = dgstr.Digest().String() } else { - w, discard, done := makeWindowsLayer(cw) + w, discard, done := makeWindowsLayer(ctx, cw) if err = archive.WriteDiff(ctx, w, lowerRoot, upperRoot); err != nil { discard(err) return errors.Wrap(err, "failed to write diff") @@ -203,7 +201,7 @@ func addSecurityDescriptor(h *tar.Header) { } } -func makeWindowsLayer(w io.Writer) (io.Writer, func(error), chan error) { +func makeWindowsLayer(ctx context.Context, w io.Writer) (io.Writer, func(error), chan error) { pr, pw := io.Pipe() done := make(chan error) @@ -259,7 +257,7 @@ func makeWindowsLayer(w io.Writer) (io.Writer, func(error), chan error) { return tarWriter.Close() }() if err != nil { - logrus.Errorf("makeWindowsLayer %+v", err) + log.G(ctx).Errorf("makeWindowsLayer %+v", err) } pw.CloseWithError(err) done <- err diff --git a/vendor/github.com/moby/buildkit/version/ua.go b/vendor/github.com/moby/buildkit/version/ua.go new file mode 100644 index 0000000000..01cfe67cd0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/version/ua.go @@ -0,0 +1,49 @@ +package version + +import ( + "fmt" + "regexp" + "strings" + "sync" +) + +var ( + reRelease *regexp.Regexp + reDev *regexp.Regexp + reOnce sync.Once + uapCbs map[string]func() string +) + +func UserAgent() string { + uaVersion := defaultVersion + + reOnce.Do(func() { + reRelease = regexp.MustCompile(`^(v[0-9]+\.[0-9]+)\.[0-9]+$`) + reDev = regexp.MustCompile(`^(v[0-9]+\.[0-9]+)\.[0-9]+`) + }) + + if matches := reRelease.FindAllStringSubmatch(Version, 1); len(matches) > 0 { + uaVersion = matches[0][1] + } else if matches := reDev.FindAllStringSubmatch(Version, 1); len(matches) > 0 { + uaVersion = matches[0][1] + "-dev" + } + + res := &strings.Builder{} + fmt.Fprintf(res, "buildkit/%s", uaVersion) + for pname, pver := range uapCbs { + fmt.Fprintf(res, " %s/%s", pname, pver()) + } + + return res.String() +} + +// SetUserAgentProduct sets a callback to get the version of a product to be +// included in the User-Agent header. The callback is called every time the +// User-Agent header is generated. Caller must ensure that the callback is +// cached if it is expensive to compute. +func SetUserAgentProduct(name string, cb func() (version string)) { + if uapCbs == nil { + uapCbs = make(map[string]func() string) + } + uapCbs[name] = cb +} diff --git a/vendor/github.com/moby/buildkit/version/version.go b/vendor/github.com/moby/buildkit/version/version.go index 49640f0f86..9cddea63c0 100644 --- a/vendor/github.com/moby/buildkit/version/version.go +++ b/vendor/github.com/moby/buildkit/version/version.go @@ -17,13 +17,8 @@ package version -import ( - "regexp" - "sync" -) - const ( - defaultVersion = "0.0.0+unknown" + defaultVersion = "v0.0.0+unknown" ) var ( @@ -37,26 +32,3 @@ var ( // the program at linking time. Revision = "" ) - -var ( - reRelease *regexp.Regexp - reDev *regexp.Regexp - reOnce sync.Once -) - -func UserAgent() string { - uaVersion := defaultVersion - - reOnce.Do(func() { - reRelease = regexp.MustCompile(`^(v[0-9]+\.[0-9]+)\.[0-9]+$`) - reDev = regexp.MustCompile(`^(v[0-9]+\.[0-9]+)\.[0-9]+`) - }) - - if matches := reRelease.FindAllStringSubmatch(Version, 1); len(matches) > 0 { - uaVersion = matches[0][1] - } else if matches := reDev.FindAllStringSubmatch(Version, 1); len(matches) > 0 { - uaVersion = matches[0][1] + "-dev" - } - - return "buildkit/" + uaVersion -} diff --git a/vendor/github.com/moby/buildkit/worker/base/worker.go b/vendor/github.com/moby/buildkit/worker/base/worker.go index 2c3e4defd1..8d402ff2fb 100644 --- a/vendor/github.com/moby/buildkit/worker/base/worker.go +++ b/vendor/github.com/moby/buildkit/worker/base/worker.go @@ -11,7 +11,6 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/gc" "github.com/containerd/containerd/images" - "github.com/containerd/containerd/leases" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes/docker" "github.com/docker/docker/pkg/idtools" @@ -21,6 +20,7 @@ import ( "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/executor/resources" "github.com/moby/buildkit/exporter" imageexporter "github.com/moby/buildkit/exporter/containerimage" localexporter "github.com/moby/buildkit/exporter/local" @@ -30,6 +30,7 @@ import ( "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" + containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/snapshot/imagerefchecker" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/llbsolver/mounts" @@ -42,6 +43,7 @@ import ( "github.com/moby/buildkit/source/local" "github.com/moby/buildkit/util/archutil" "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/network" "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/progress/controller" @@ -67,17 +69,18 @@ type WorkerOpt struct { NetworkProviders map[pb.NetMode]network.Provider Executor executor.Executor Snapshotter snapshot.Snapshotter - ContentStore content.Store + ContentStore *containerdsnapshot.Store Applier diff.Applier Differ diff.Comparer ImageStore images.Store // optional RegistryHosts docker.RegistryHosts IdentityMapping *idtools.IdentityMapping - LeaseManager leases.Manager + LeaseManager *leaseutil.Manager GarbageCollect func(context.Context) (gc.Stats, error) ParallelismSem *semaphore.Weighted MetadataStore *metadata.Store MountPoolRoot string + ResourceMonitor *resources.Monitor } // Worker is a local worker instance with dedicated snapshotter, cache, and so on. @@ -213,14 +216,19 @@ func (w *Worker) Close() error { rerr = multierror.Append(rerr, err) } } + if w.ResourceMonitor != nil { + if err := w.ResourceMonitor.Close(); err != nil { + rerr = multierror.Append(rerr, err) + } + } return rerr } -func (w *Worker) ContentStore() content.Store { +func (w *Worker) ContentStore() *containerdsnapshot.Store { return w.WorkerOpt.ContentStore } -func (w *Worker) LeaseManager() leases.Manager { +func (w *Worker) LeaseManager() *leaseutil.Manager { return w.WorkerOpt.LeaseManager } @@ -354,7 +362,7 @@ func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error { return nil } -func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) { +func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) { // is this an registry source? Or an OCI layout source? switch opt.ResolverType { case llb.ResolverTypeOCILayout: diff --git a/vendor/github.com/moby/buildkit/worker/cacheresult.go b/vendor/github.com/moby/buildkit/worker/cacheresult.go index a635a53502..50f7c93688 100644 --- a/vendor/github.com/moby/buildkit/worker/cacheresult.go +++ b/vendor/github.com/moby/buildkit/worker/cacheresult.go @@ -95,8 +95,8 @@ func (s *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheRe } return remotes, nil } -func (s *cacheResultStorage) Exists(id string) bool { - ref, err := s.load(context.TODO(), id, true) +func (s *cacheResultStorage) Exists(ctx context.Context, id string) bool { + ref, err := s.load(ctx, id, true) if err != nil { return false } diff --git a/vendor/github.com/moby/buildkit/worker/containerd/containerd.go b/vendor/github.com/moby/buildkit/worker/containerd/containerd.go index a829d45757..e8d948d0e8 100644 --- a/vendor/github.com/moby/buildkit/worker/containerd/containerd.go +++ b/vendor/github.com/moby/buildkit/worker/containerd/containerd.go @@ -10,7 +10,7 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/gc" "github.com/containerd/containerd/leases" - gogoptypes "github.com/gogo/protobuf/types" + ptypes "github.com/containerd/containerd/protobuf/types" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/executor/containerdexecutor" @@ -53,7 +53,7 @@ func newContainerd(root string, client *containerd.Client, snapshotterName, ns s return base.WorkerOpt{}, err } - serverInfo, err := client.IntrospectionService().Server(context.TODO(), &gogoptypes.Empty{}) + serverInfo, err := client.IntrospectionService().Server(context.TODO(), &ptypes.Empty{}) if err != nil { return base.WorkerOpt{}, err } diff --git a/vendor/github.com/moby/buildkit/worker/worker.go b/vendor/github.com/moby/buildkit/worker/worker.go index 2f426e9ead..d62047e9fb 100644 --- a/vendor/github.com/moby/buildkit/worker/worker.go +++ b/vendor/github.com/moby/buildkit/worker/worker.go @@ -4,8 +4,6 @@ import ( "context" "io" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/leases" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" @@ -13,7 +11,9 @@ import ( "github.com/moby/buildkit/exporter" "github.com/moby/buildkit/frontend" "github.com/moby/buildkit/session" + containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/leaseutil" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -30,16 +30,16 @@ type Worker interface { LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) // ResolveOp resolves Vertex.Sys() to Op implementation. ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) - ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) + ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (string, digest.Digest, []byte, error) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) Prune(ctx context.Context, ch chan client.UsageInfo, opt ...client.PruneInfo) error FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) PruneCacheMounts(ctx context.Context, ids []string) error - ContentStore() content.Store + ContentStore() *containerdsnapshot.Store Executor() executor.Executor CacheManager() cache.Manager - LeaseManager() leases.Manager + LeaseManager() *leaseutil.Manager } type Infos interface { diff --git a/vendor/github.com/spdx/tools-golang/convert/chain.go b/vendor/github.com/spdx/tools-golang/convert/chain.go new file mode 100644 index 0000000000..ac96733c1b --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/convert/chain.go @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package convert + +import ( + "fmt" + "reflect" + + converter "github.com/anchore/go-struct-converter" + + "github.com/spdx/tools-golang/spdx/common" + "github.com/spdx/tools-golang/spdx/v2/v2_1" + "github.com/spdx/tools-golang/spdx/v2/v2_2" + "github.com/spdx/tools-golang/spdx/v2/v2_3" +) + +var DocumentChain = converter.NewChain( + v2_1.Document{}, + v2_2.Document{}, + v2_3.Document{}, +) + +// Document converts from one document to another document +// For example, converting a document to the latest version could be done like: +// +// sourceDoc := // e.g. a v2_2.Document from somewhere +// var targetDoc spdx.Document // this can be any document version +// err := convert.Document(sourceDoc, &targetDoc) // the target must be passed as a pointer +func Document(from common.AnyDocument, to common.AnyDocument) error { + if !IsPtr(to) { + return fmt.Errorf("struct to convert to must be a pointer") + } + from = FromPtr(from) + if reflect.TypeOf(from) == reflect.TypeOf(FromPtr(to)) { + reflect.ValueOf(to).Elem().Set(reflect.ValueOf(from)) + return nil + } + return DocumentChain.Convert(from, to) +} diff --git a/vendor/github.com/spdx/tools-golang/convert/struct.go b/vendor/github.com/spdx/tools-golang/convert/struct.go new file mode 100644 index 0000000000..7223dbdbd4 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/convert/struct.go @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package convert + +import ( + "fmt" + "reflect" + + "github.com/spdx/tools-golang/spdx/common" +) + +// FromPtr accepts a document or a document pointer and returns the direct struct reference +func FromPtr(doc common.AnyDocument) common.AnyDocument { + value := reflect.ValueOf(doc) + for value.Type().Kind() == reflect.Ptr { + value = value.Elem() + } + return value.Interface() +} + +func IsPtr(obj common.AnyDocument) bool { + t := reflect.TypeOf(obj) + if t.Kind() == reflect.Interface { + t = t.Elem() + } + return t.Kind() == reflect.Ptr +} + +func Describe(o interface{}) string { + value := reflect.ValueOf(o) + typ := value.Type() + prefix := "" + for typ.Kind() == reflect.Ptr { + prefix += "*" + value = value.Elem() + typ = value.Type() + } + str := limit(fmt.Sprintf("%+v", value.Interface()), 300) + name := fmt.Sprintf("%s.%s%s", typ.PkgPath(), prefix, typ.Name()) + return fmt.Sprintf("%s: %s", name, str) +} + +func limit(text string, length int) string { + if length <= 0 || len(text) <= length+3 { + return text + } + r := []rune(text) + r = r[:length] + return string(r) + "..." +} diff --git a/vendor/github.com/spdx/tools-golang/json/parser.go b/vendor/github.com/spdx/tools-golang/json/parser.go deleted file mode 100644 index ee7915de0f..0000000000 --- a/vendor/github.com/spdx/tools-golang/json/parser.go +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later - -package spdx_json - -import ( - "bytes" - "encoding/json" - "io" - - "github.com/spdx/tools-golang/spdx/v2_2" - "github.com/spdx/tools-golang/spdx/v2_3" -) - -// Load2_2 takes in an io.Reader and returns an SPDX document. -func Load2_2(content io.Reader) (*v2_2.Document, error) { - // convert io.Reader to a slice of bytes and call the parser - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(content) - if err != nil { - return nil, err - } - - var doc v2_2.Document - err = json.Unmarshal(buf.Bytes(), &doc) - if err != nil { - return nil, err - } - - return &doc, nil -} - -// Load2_3 takes in an io.Reader and returns an SPDX document. -func Load2_3(content io.Reader) (*v2_3.Document, error) { - // convert io.Reader to a slice of bytes and call the parser - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(content) - if err != nil { - return nil, err - } - - var doc v2_3.Document - err = json.Unmarshal(buf.Bytes(), &doc) - if err != nil { - return nil, err - } - - return &doc, nil -} diff --git a/vendor/github.com/spdx/tools-golang/json/reader.go b/vendor/github.com/spdx/tools-golang/json/reader.go new file mode 100644 index 0000000000..f1a0b989af --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/json/reader.go @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + "github.com/spdx/tools-golang/convert" + "github.com/spdx/tools-golang/spdx" + "github.com/spdx/tools-golang/spdx/common" + "github.com/spdx/tools-golang/spdx/v2/v2_1" + "github.com/spdx/tools-golang/spdx/v2/v2_2" + "github.com/spdx/tools-golang/spdx/v2/v2_3" +) + +// Read takes an io.Reader and returns a fully-parsed current model SPDX Document +// or an error if any error is encountered. +func Read(content io.Reader) (*spdx.Document, error) { + doc := spdx.Document{} + err := ReadInto(content, &doc) + return &doc, err +} + +// ReadInto takes an io.Reader, reads in the SPDX document at the version provided +// and converts to the doc version +func ReadInto(content io.Reader, doc common.AnyDocument) error { + if !convert.IsPtr(doc) { + return fmt.Errorf("doc to read into must be a pointer") + } + + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(content) + if err != nil { + return err + } + + var data interface{} + err = json.Unmarshal(buf.Bytes(), &data) + if err != nil { + return err + } + + val, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("not a valid SPDX JSON document") + } + + version, ok := val["spdxVersion"] + if !ok { + return fmt.Errorf("JSON document does not contain spdxVersion field") + } + + switch version { + case v2_1.Version: + var doc v2_1.Document + err = json.Unmarshal(buf.Bytes(), &doc) + if err != nil { + return err + } + data = doc + case v2_2.Version: + var doc v2_2.Document + err = json.Unmarshal(buf.Bytes(), &doc) + if err != nil { + return err + } + data = doc + case v2_3.Version: + var doc v2_3.Document + err = json.Unmarshal(buf.Bytes(), &doc) + if err != nil { + return err + } + data = doc + default: + return fmt.Errorf("unsupported SDPX version: %s", version) + } + + return convert.Document(data, doc) +} diff --git a/vendor/github.com/spdx/tools-golang/json/writer.go b/vendor/github.com/spdx/tools-golang/json/writer.go index 8f2b94dc60..a944dccb9e 100644 --- a/vendor/github.com/spdx/tools-golang/json/writer.go +++ b/vendor/github.com/spdx/tools-golang/json/writer.go @@ -1,41 +1,33 @@ // SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later -package spdx_json +package json import ( "encoding/json" - "github.com/spdx/tools-golang/spdx/v2_3" "io" - "github.com/spdx/tools-golang/spdx/v2_2" + "github.com/spdx/tools-golang/spdx/common" ) -// Save2_2 takes an SPDX Document (version 2.2) and an io.Writer, and writes the document to the writer in JSON format. -func Save2_2(doc *v2_2.Document, w io.Writer) error { - buf, err := json.Marshal(doc) - if err != nil { - return err - } +type WriteOption func(*json.Encoder) - _, err = w.Write(buf) - if err != nil { - return err +func Indent(indent string) WriteOption { + return func(e *json.Encoder) { + e.SetIndent("", indent) } - - return nil } -// Save2_3 takes an SPDX Document (version 2.2) and an io.Writer, and writes the document to the writer in JSON format. -func Save2_3(doc *v2_3.Document, w io.Writer) error { - buf, err := json.Marshal(doc) - if err != nil { - return err +func EscapeHTML(escape bool) WriteOption { + return func(e *json.Encoder) { + e.SetEscapeHTML(escape) } - - _, err = w.Write(buf) - if err != nil { - return err - } - - return nil +} + +// Write takes an SPDX Document and an io.Writer, and writes the document to the writer in JSON format. +func Write(doc common.AnyDocument, w io.Writer, opts ...WriteOption) error { + e := json.NewEncoder(w) + for _, opt := range opts { + opt(e) + } + return e.Encode(doc) } diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/types.go b/vendor/github.com/spdx/tools-golang/spdx/common/types.go new file mode 100644 index 0000000000..059d62f22b --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/types.go @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +// AnyDocument a placeholder for allowing any SPDX document to be used in function args +type AnyDocument interface{} diff --git a/vendor/github.com/spdx/tools-golang/spdx/model.go b/vendor/github.com/spdx/tools-golang/spdx/model.go new file mode 100644 index 0000000000..e91856b0e5 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/model.go @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +// Package spdx contains references to the latest spdx version +package spdx + +import ( + "github.com/spdx/tools-golang/spdx/v2/common" + latest "github.com/spdx/tools-golang/spdx/v2/v2_3" +) + +const ( + Version = latest.Version + DataLicense = latest.DataLicense +) + +type ( + Annotation = latest.Annotation + ArtifactOfProject = latest.ArtifactOfProject + CreationInfo = latest.CreationInfo + Document = latest.Document + ExternalDocumentRef = latest.ExternalDocumentRef + File = latest.File + OtherLicense = latest.OtherLicense + Package = latest.Package + PackageExternalReference = latest.PackageExternalReference + Relationship = latest.Relationship + Review = latest.Review + Snippet = latest.Snippet +) + +type ( + Annotator = common.Annotator + Checksum = common.Checksum + ChecksumAlgorithm = common.ChecksumAlgorithm + Creator = common.Creator + DocElementID = common.DocElementID + ElementID = common.ElementID + Originator = common.Originator + PackageVerificationCode = common.PackageVerificationCode + SnippetRange = common.SnippetRange + SnippetRangePointer = common.SnippetRangePointer + Supplier = common.Supplier +) + +const ( + SHA224 = common.SHA224 + SHA1 = common.SHA1 + SHA256 = common.SHA256 + SHA384 = common.SHA384 + SHA512 = common.SHA512 + MD2 = common.MD2 + MD4 = common.MD4 + MD5 = common.MD5 + MD6 = common.MD6 + SHA3_256 = common.SHA3_256 + SHA3_384 = common.SHA3_384 + SHA3_512 = common.SHA3_512 + BLAKE2b_256 = common.BLAKE2b_256 + BLAKE2b_384 = common.BLAKE2b_384 + BLAKE2b_512 = common.BLAKE2b_512 + BLAKE3 = common.BLAKE3 + ADLER32 = common.ADLER32 +) + +const ( + // F.2 Security types + CategorySecurity = common.CategorySecurity + SecurityCPE23Type = common.TypeSecurityCPE23Type + SecurityCPE22Type = common.TypeSecurityCPE22Type + SecurityAdvisory = common.TypeSecurityAdvisory + SecurityFix = common.TypeSecurityFix + SecurityUrl = common.TypeSecurityUrl + SecuritySwid = common.TypeSecuritySwid + + // F.3 Package-Manager types + CategoryPackageManager = common.CategoryPackageManager + PackageManagerMavenCentral = common.TypePackageManagerMavenCentral + PackageManagerNpm = common.TypePackageManagerNpm + PackageManagerNuGet = common.TypePackageManagerNuGet + PackageManagerBower = common.TypePackageManagerBower + PackageManagerPURL = common.TypePackageManagerPURL + + // F.4 Persistent-Id types + CategoryPersistentId = common.CategoryPersistentId + TypePersistentIdSwh = common.TypePersistentIdSwh + TypePersistentIdGitoid = common.TypePersistentIdGitoid + + // 11.1 Relationship field types + RelationshipDescribes = common.TypeRelationshipDescribe + RelationshipDescribedBy = common.TypeRelationshipDescribeBy + RelationshipContains = common.TypeRelationshipContains + RelationshipContainedBy = common.TypeRelationshipContainedBy + RelationshipDependsOn = common.TypeRelationshipDependsOn + RelationshipDependencyOf = common.TypeRelationshipDependencyOf + RelationshipBuildDependencyOf = common.TypeRelationshipBuildDependencyOf + RelationshipDevDependencyOf = common.TypeRelationshipDevDependencyOf + RelationshipOptionalDependencyOf = common.TypeRelationshipOptionalDependencyOf + RelationshipProvidedDependencyOf = common.TypeRelationshipProvidedDependencyOf + RelationshipTestDependencyOf = common.TypeRelationshipTestDependencyOf + RelationshipRuntimeDependencyOf = common.TypeRelationshipRuntimeDependencyOf + RelationshipExampleOf = common.TypeRelationshipExampleOf + RelationshipGenerates = common.TypeRelationshipGenerates + RelationshipGeneratedFrom = common.TypeRelationshipGeneratedFrom + RelationshipAncestorOf = common.TypeRelationshipAncestorOf + RelationshipDescendantOf = common.TypeRelationshipDescendantOf + RelationshipVariantOf = common.TypeRelationshipVariantOf + RelationshipDistributionArtifact = common.TypeRelationshipDistributionArtifact + RelationshipPatchFor = common.TypeRelationshipPatchFor + RelationshipPatchApplied = common.TypeRelationshipPatchApplied + RelationshipCopyOf = common.TypeRelationshipCopyOf + RelationshipFileAdded = common.TypeRelationshipFileAdded + RelationshipFileDeleted = common.TypeRelationshipFileDeleted + RelationshipFileModified = common.TypeRelationshipFileModified + RelationshipExpandedFromArchive = common.TypeRelationshipExpandedFromArchive + RelationshipDynamicLink = common.TypeRelationshipDynamicLink + RelationshipStaticLink = common.TypeRelationshipStaticLink + RelationshipDataFileOf = common.TypeRelationshipDataFileOf + RelationshipTestCaseOf = common.TypeRelationshipTestCaseOf + RelationshipBuildToolOf = common.TypeRelationshipBuildToolOf + RelationshipDevToolOf = common.TypeRelationshipDevToolOf + RelationshipTestOf = common.TypeRelationshipTestOf + RelationshipTestToolOf = common.TypeRelationshipTestToolOf + RelationshipDocumentationOf = common.TypeRelationshipDocumentationOf + RelationshipOptionalComponentOf = common.TypeRelationshipOptionalComponentOf + RelationshipMetafileOf = common.TypeRelationshipMetafileOf + RelationshipPackageOf = common.TypeRelationshipPackageOf + RelationshipAmends = common.TypeRelationshipAmends + RelationshipPrerequisiteFor = common.TypeRelationshipPrerequisiteFor + RelationshipHasPrerequisite = common.TypeRelationshipHasPrerequisite + RelationshipRequirementDescriptionFor = common.TypeRelationshipRequirementDescriptionFor + RelationshipSpecificationFor = common.TypeRelationshipSpecificationFor + RelationshipOther = common.TypeRelationshipOther +) diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/annotation.go b/vendor/github.com/spdx/tools-golang/spdx/v2/common/annotation.go similarity index 100% rename from vendor/github.com/spdx/tools-golang/spdx/common/annotation.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/common/annotation.go diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/checksum.go b/vendor/github.com/spdx/tools-golang/spdx/v2/common/checksum.go similarity index 91% rename from vendor/github.com/spdx/tools-golang/spdx/common/checksum.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/common/checksum.go index aa2ae52ff1..d4969ef846 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/common/checksum.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/common/checksum.go @@ -5,7 +5,7 @@ package common // ChecksumAlgorithm represents the algorithm used to generate the file checksum in the Checksum struct. type ChecksumAlgorithm string -// The checksum algorithms mentioned in the spdxv2.2.0 https://spdx.github.io/spdx-spec/4-file-information/#44-file-checksum +// The checksum algorithms mentioned in the spec https://spdx.github.io/spdx-spec/4-file-information/#44-file-checksum const ( SHA224 ChecksumAlgorithm = "SHA224" SHA1 ChecksumAlgorithm = "SHA1" diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/creation_info.go b/vendor/github.com/spdx/tools-golang/spdx/v2/common/creation_info.go similarity index 100% rename from vendor/github.com/spdx/tools-golang/spdx/common/creation_info.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/common/creation_info.go diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/external.go b/vendor/github.com/spdx/tools-golang/spdx/v2/common/external.go similarity index 93% rename from vendor/github.com/spdx/tools-golang/spdx/common/external.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/common/external.go index 59c3f0f03f..8344ac6162 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/common/external.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/common/external.go @@ -4,7 +4,9 @@ package common // Constants for various string types const ( + // F.2 Security types + CategorySecurity string = "SECURITY" TypeSecurityCPE23Type string = "cpe23Type" TypeSecurityCPE22Type string = "cpe22Type" TypeSecurityAdvisory string = "advisory" @@ -13,11 +15,16 @@ const ( TypeSecuritySwid string = "swid" // F.3 Package-Manager types + CategoryPackageManager string = "PACKAGE-MANAGER" TypePackageManagerMavenCentral string = "maven-central" TypePackageManagerNpm string = "npm" TypePackageManagerNuGet string = "nuget" TypePackageManagerBower string = "bower" TypePackageManagerPURL string = "purl" + // F.4 Persistent-Id types + CategoryPersistentId string = "PERSISTENT-ID" + TypePersistentIdSwh string = "swh" + TypePersistentIdGitoid string = "gitoid" // 11.1 Relationship field types TypeRelationshipDescribe string = "DESCRIBES" diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/identifier.go b/vendor/github.com/spdx/tools-golang/spdx/v2/common/identifier.go similarity index 100% rename from vendor/github.com/spdx/tools-golang/spdx/common/identifier.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/common/identifier.go diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/package.go b/vendor/github.com/spdx/tools-golang/spdx/v2/common/package.go similarity index 100% rename from vendor/github.com/spdx/tools-golang/spdx/common/package.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/common/package.go diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/snippet.go b/vendor/github.com/spdx/tools-golang/spdx/v2/common/snippet.go similarity index 100% rename from vendor/github.com/spdx/tools-golang/spdx/common/snippet.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/common/snippet.go diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/annotation.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/annotation.go new file mode 100644 index 0000000000..c80f64cfde --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/annotation.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) + +// Annotation is an Annotation section of an SPDX Document for version 2.1 of the spec. +type Annotation struct { + // 8.1: Annotator + // Cardinality: conditional (mandatory, one) if there is an Annotation + Annotator common.Annotator `json:"annotator"` + + // 8.2: Annotation Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationDate string `json:"annotationDate"` + + // 8.3: Annotation Type: "REVIEW" or "OTHER" + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationType string `json:"annotationType"` + + // 8.4: SPDX Identifier Reference + // Cardinality: conditional (mandatory, one) if there is an Annotation + // This field is not used in hierarchical data formats where the referenced element is clear, such as JSON or YAML. + AnnotationSPDXIdentifier common.DocElementID `json:"-"` + + // 8.5: Annotation Comment + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationComment string `json:"comment"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/creation_info.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/creation_info.go new file mode 100644 index 0000000000..c75e8ea810 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/creation_info.go @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) + +// CreationInfo is a Document Creation Information section of an +// SPDX Document for version 2.1 of the spec. +type CreationInfo struct { + // 2.7: License List Version + // Cardinality: optional, one + LicenseListVersion string `json:"licenseListVersion,omitempty"` + + // 2.8: Creators: may have multiple keys for Person, Organization + // and/or Tool + // Cardinality: mandatory, one or many + Creators []common.Creator `json:"creators"` + + // 2.9: Created: data format YYYY-MM-DDThh:mm:ssZ + // Cardinality: mandatory, one + Created string `json:"created"` + + // 2.10: Creator Comment + // Cardinality: optional, one + CreatorComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/document.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/document.go similarity index 69% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/document.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/document.go index 31ac08b6c7..60a27c44d8 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_2/document.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/document.go @@ -1,12 +1,19 @@ // Package spdx contains the struct definition for an SPDX Document // and its constituent parts. // SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later -package v2_2 +package v2_1 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/anchore/go-struct-converter" + + "github.com/spdx/tools-golang/spdx/v2/common" +) + +const Version = "SPDX-2.1" +const DataLicense = "CC0-1.0" // ExternalDocumentRef is a reference to an external SPDX document -// as defined in section 6.6 for version 2.2 of the spec. +// as defined in section 2.6 for version 2.1 of the spec. type ExternalDocumentRef struct { // DocumentRefID is the ID string defined in the start of the // reference. It should _not_ contain the "DocumentRef-" part @@ -20,35 +27,35 @@ type ExternalDocumentRef struct { Checksum common.Checksum `json:"checksum"` } -// Document is an SPDX Document for version 2.2 of the spec. -// See https://spdx.github.io/spdx-spec/v2-draft/ (DRAFT) +// Document is an SPDX Document for version 2.1 of the spec. +// See https://spdx.org/sites/cpstandard/files/pages/files/spdxversion2.1.pdf type Document struct { - // 6.1: SPDX Version; should be in the format "SPDX-2.2" + // 2.1: SPDX Version; should be in the format "SPDX-2.1" // Cardinality: mandatory, one SPDXVersion string `json:"spdxVersion"` - // 6.2: Data License; should be "CC0-1.0" + // 2.2: Data License; should be "CC0-1.0" // Cardinality: mandatory, one DataLicense string `json:"dataLicense"` - // 6.3: SPDX Identifier; should be "DOCUMENT" to represent + // 2.3: SPDX Identifier; should be "DOCUMENT" to represent // mandatory identifier of SPDXRef-DOCUMENT // Cardinality: mandatory, one SPDXIdentifier common.ElementID `json:"SPDXID"` - // 6.4: Document Name + // 2.4: Document Name // Cardinality: mandatory, one DocumentName string `json:"name"` - // 6.5: Document Namespace + // 2.5: Document Namespace // Cardinality: mandatory, one DocumentNamespace string `json:"documentNamespace"` - // 6.6: External Document References + // 2.6: External Document References // Cardinality: optional, one or many ExternalDocumentReferences []ExternalDocumentRef `json:"externalDocumentRefs,omitempty"` - // 6.11: Document Comment + // 2.11: Document Comment // Cardinality: optional, one DocumentComment string `json:"comment,omitempty"` @@ -63,3 +70,10 @@ type Document struct { // DEPRECATED in version 2.0 of spec Reviews []*Review `json:"-"` } + +func (d *Document) ConvertFrom(_ interface{}) error { + d.SPDXVersion = Version + return nil +} + +var _ converter.ConvertFrom = (*Document)(nil) diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/file.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/file.go new file mode 100644 index 0000000000..50bdcf1a22 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/file.go @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) + +// File is a File section of an SPDX Document for version 2.1 of the spec. +type File struct { + // 4.1: File Name + // Cardinality: mandatory, one + FileName string `json:"fileName"` + + // 4.2: File SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + FileSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 4.3: File Types + // Cardinality: optional, multiple + FileTypes []string `json:"fileTypes,omitempty"` + + // 4.4: File Checksum: may have keys for SHA1, SHA256 and/or MD5 + // Cardinality: mandatory, one SHA1, others may be optionally provided + Checksums []common.Checksum `json:"checksums"` + + // 4.5: Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + LicenseConcluded string `json:"licenseConcluded"` + + // 4.6: License Information in File: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one or many + LicenseInfoInFiles []string `json:"licenseInfoInFiles"` + + // 4.7: Comments on License + // Cardinality: optional, one + LicenseComments string `json:"licenseComments,omitempty"` + + // 4.8: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + FileCopyrightText string `json:"copyrightText"` + + // DEPRECATED in version 2.1 of spec + // 4.9-4.11: Artifact of Project variables (defined below) + // Cardinality: optional, one or many + ArtifactOfProjects []*ArtifactOfProject `json:"-"` + + // 4.12: File Comment + // Cardinality: optional, one + FileComment string `json:"comment,omitempty"` + + // 4.13: File Notice + // Cardinality: optional, one + FileNotice string `json:"noticeText,omitempty"` + + // 4.14: File Contributor + // Cardinality: optional, one or many + FileContributors []string `json:"fileContributors,omitempty"` + + // DEPRECATED in version 2.0 of spec + // 4.15: File Dependencies + // Cardinality: optional, one or many + FileDependencies []string `json:"-"` + + // Snippets contained in this File + // Note that Snippets could be defined in a different Document! However, + // the only ones that _THIS_ document can contain are the ones that are + // defined here -- so this should just be an ElementID. + Snippets map[common.ElementID]*Snippet `json:"-"` + + Annotations []Annotation `json:"annotations,omitempty"` +} + +// ArtifactOfProject is a DEPRECATED collection of data regarding +// a Package, as defined in sections 4.9-4.11 in version 2.1 of the spec. +type ArtifactOfProject struct { + + // DEPRECATED in version 2.1 of spec + // 4.9: Artifact of Project Name + // Cardinality: conditional, required if present, one per AOP + Name string + + // DEPRECATED in version 2.1 of spec + // 4.10: Artifact of Project Homepage: URL or "UNKNOWN" + // Cardinality: optional, one per AOP + HomePage string + + // DEPRECATED in version 2.1 of spec + // 4.11: Artifact of Project Uniform Resource Identifier + // Cardinality: optional, one per AOP + URI string +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/other_license.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/other_license.go new file mode 100644 index 0000000000..6ae09feb6f --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/other_license.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +// OtherLicense is an Other License Information section of an +// SPDX Document for version 2.1 of the spec. +type OtherLicense struct { + // 6.1: License Identifier: "LicenseRef-[idstring]" + // Cardinality: conditional (mandatory, one) if license is not + // on SPDX License List + LicenseIdentifier string `json:"licenseId"` + + // 6.2: Extracted Text + // Cardinality: conditional (mandatory, one) if there is a + // License Identifier assigned + ExtractedText string `json:"extractedText"` + + // 6.3: License Name: single line of text or "NOASSERTION" + // Cardinality: conditional (mandatory, one) if license is not + // on SPDX License List + LicenseName string `json:"name,omitempty"` + + // 6.4: License Cross Reference + // Cardinality: conditional (optional, one or many) if license + // is not on SPDX License List + LicenseCrossReferences []string `json:"seeAlsos,omitempty"` + + // 6.5: License Comment + // Cardinality: optional, one + LicenseComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/package.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/package.go new file mode 100644 index 0000000000..9800c2c23b --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/package.go @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) + +// Package is a Package section of an SPDX Document for version 2.1 of the spec. +type Package struct { + // 3.1: Package Name + // Cardinality: mandatory, one + PackageName string `json:"name"` + + // 3.2: Package SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + PackageSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 3.3: Package Version + // Cardinality: optional, one + PackageVersion string `json:"versionInfo,omitempty"` + + // 3.4: Package File Name + // Cardinality: optional, one + PackageFileName string `json:"packageFileName,omitempty"` + + // 3.5: Package Supplier: may have single result for either Person or Organization, + // or NOASSERTION + // Cardinality: optional, one + PackageSupplier *common.Supplier `json:"supplier,omitempty"` + + // 3.6: Package Originator: may have single result for either Person or Organization, + // or NOASSERTION + // Cardinality: optional, one + PackageOriginator *common.Originator `json:"originator,omitempty"` + + // 3.7: Package Download Location + // Cardinality: mandatory, one + PackageDownloadLocation string `json:"downloadLocation"` + + // 3.8: FilesAnalyzed + // Cardinality: optional, one; default value is "true" if omitted + FilesAnalyzed bool `json:"filesAnalyzed,omitempty"` + // NOT PART OF SPEC: did FilesAnalyzed tag appear? + IsFilesAnalyzedTagPresent bool `json:"-"` + + // 3.9: Package Verification Code + PackageVerificationCode common.PackageVerificationCode `json:"packageVerificationCode,omitempty"` + + // 3.10: Package Checksum: may have keys for SHA1, SHA256 and/or MD5 + // Cardinality: optional, one or many + PackageChecksums []common.Checksum `json:"checksums,omitempty"` + + // 3.11: Package Home Page + // Cardinality: optional, one + PackageHomePage string `json:"homepage,omitempty"` + + // 3.12: Source Information + // Cardinality: optional, one + PackageSourceInfo string `json:"sourceInfo,omitempty"` + + // 3.13: Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + PackageLicenseConcluded string `json:"licenseConcluded"` + + // 3.14: All Licenses Info from Files: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one or many if filesAnalyzed is true / omitted; + // zero (must be omitted) if filesAnalyzed is false + PackageLicenseInfoFromFiles []string `json:"licenseInfoFromFiles"` + + // 3.15: Declared License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + PackageLicenseDeclared string `json:"licenseDeclared"` + + // 3.16: Comments on License + // Cardinality: optional, one + PackageLicenseComments string `json:"licenseComments,omitempty"` + + // 3.17: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + PackageCopyrightText string `json:"copyrightText"` + + // 3.18: Package Summary Description + // Cardinality: optional, one + PackageSummary string `json:"summary,omitempty"` + + // 3.19: Package Detailed Description + // Cardinality: optional, one + PackageDescription string `json:"description,omitempty"` + + // 3.20: Package Comment + // Cardinality: optional, one + PackageComment string `json:"comment,omitempty"` + + // 3.21: Package External Reference + // Cardinality: optional, one or many + PackageExternalReferences []*PackageExternalReference `json:"externalRefs,omitempty"` + + // Files contained in this Package + Files []*File `json:"files,omitempty"` + + Annotations []Annotation `json:"annotations,omitempty"` +} + +// PackageExternalReference is an External Reference to additional info +// about a Package, as defined in section 3.21 in version 2.1 of the spec. +type PackageExternalReference struct { + // category is "SECURITY", "PACKAGE-MANAGER" or "OTHER" + Category string `json:"referenceCategory"` + + // type is an [idstring] as defined in Appendix VI; + // called RefType here due to "type" being a Golang keyword + RefType string `json:"referenceType"` + + // locator is a unique string to access the package-specific + // info, metadata or content within the target location + Locator string `json:"referenceLocator"` + + // 3.22: Package External Reference Comment + // Cardinality: conditional (optional, one) for each External Reference + ExternalRefComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/relationship.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/relationship.go new file mode 100644 index 0000000000..827927aebd --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/relationship.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) + +// Relationship is a Relationship section of an SPDX Document for +// version 2.1 of the spec. +type Relationship struct { + + // 7.1: Relationship + // Cardinality: optional, one or more; one per Relationship + // one mandatory for SPDX Document with multiple packages + // RefA and RefB are first and second item + // Relationship is type from 7.1.1 + RefA common.DocElementID `json:"spdxElementId"` + RefB common.DocElementID `json:"relatedSpdxElement"` + Relationship string `json:"relationshipType"` + + // 7.2: Relationship Comment + // Cardinality: optional, one + RelationshipComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/review.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/review.go new file mode 100644 index 0000000000..8d70d00e4b --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/review.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +// Review is a Review section of an SPDX Document for version 2.1 of the spec. +// DEPRECATED in version 2.0 of spec; retained here for compatibility. +type Review struct { + + // DEPRECATED in version 2.0 of spec + // 9.1: Reviewer + // Cardinality: optional, one + Reviewer string + // including AnnotatorType: one of "Person", "Organization" or "Tool" + ReviewerType string + + // DEPRECATED in version 2.0 of spec + // 9.2: Review Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: conditional (mandatory, one) if there is a Reviewer + ReviewDate string + + // DEPRECATED in version 2.0 of spec + // 9.3: Review Comment + // Cardinality: optional, one + ReviewComment string +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/snippet.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/snippet.go new file mode 100644 index 0000000000..9b94fd8d81 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/snippet.go @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_1 + +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) + +// Snippet is a Snippet section of an SPDX Document for version 2.1 of the spec. +type Snippet struct { + + // 5.1: Snippet SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + SnippetSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 5.2: Snippet from File SPDX Identifier + // Cardinality: mandatory, one + SnippetFromFileSPDXIdentifier common.ElementID `json:"snippetFromFile"` + + // Ranges denotes the start/end byte offsets or line numbers that the snippet is relevant to + Ranges []common.SnippetRange `json:"ranges"` + + // 5.5: Snippet Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + SnippetLicenseConcluded string `json:"licenseConcluded"` + + // 5.6: License Information in Snippet: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one or many + LicenseInfoInSnippet []string `json:"licenseInfoInSnippets,omitempty"` + + // 5.7: Snippet Comments on License + // Cardinality: optional, one + SnippetLicenseComments string `json:"licenseComments,omitempty"` + + // 5.8: Snippet Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + SnippetCopyrightText string `json:"copyrightText"` + + // 5.9: Snippet Comment + // Cardinality: optional, one + SnippetComment string `json:"comment,omitempty"` + + // 5.10: Snippet Name + // Cardinality: optional, one + SnippetName string `json:"name,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/annotation.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/annotation.go similarity index 94% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/annotation.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/annotation.go index 35eddc617e..3d76d4b71f 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_2/annotation.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/annotation.go @@ -2,7 +2,9 @@ package v2_2 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) // Annotation is an Annotation section of an SPDX Document for version 2.2 of the spec. type Annotation struct { diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/creation_info.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/creation_info.go similarity index 84% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/creation_info.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/creation_info.go index 70e611f79b..39082e7acf 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_2/creation_info.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/creation_info.go @@ -2,14 +2,16 @@ package v2_2 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) // CreationInfo is a Document Creation Information section of an // SPDX Document for version 2.2 of the spec. type CreationInfo struct { // 6.7: License List Version // Cardinality: optional, one - LicenseListVersion string `json:"licenseListVersion"` + LicenseListVersion string `json:"licenseListVersion,omitempty"` // 6.8: Creators: may have multiple keys for Person, Organization // and/or Tool diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/document.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/document.go new file mode 100644 index 0000000000..d94f5b066c --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/document.go @@ -0,0 +1,150 @@ +// Package spdx contains the struct definition for an SPDX Document +// and its constituent parts. +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +package v2_2 + +import ( + "encoding/json" + "fmt" + + converter "github.com/anchore/go-struct-converter" + + "github.com/spdx/tools-golang/spdx/v2/common" +) + +const Version = "SPDX-2.2" +const DataLicense = "CC0-1.0" + +// ExternalDocumentRef is a reference to an external SPDX document +// as defined in section 6.6 for version 2.2 of the spec. +type ExternalDocumentRef struct { + // DocumentRefID is the ID string defined in the start of the + // reference. It should _not_ contain the "DocumentRef-" part + // of the mandatory ID string. + DocumentRefID string `json:"externalDocumentId"` + + // URI is the URI defined for the external document + URI string `json:"spdxDocument"` + + // Checksum is the actual hash data + Checksum common.Checksum `json:"checksum"` +} + +// Document is an SPDX Document for version 2.2 of the spec. +// See https://spdx.github.io/spdx-spec/v2-draft/ (DRAFT) +type Document struct { + // 6.1: SPDX Version; should be in the format "SPDX-2.2" + // Cardinality: mandatory, one + SPDXVersion string `json:"spdxVersion"` + + // 6.2: Data License; should be "CC0-1.0" + // Cardinality: mandatory, one + DataLicense string `json:"dataLicense"` + + // 6.3: SPDX Identifier; should be "DOCUMENT" to represent + // mandatory identifier of SPDXRef-DOCUMENT + // Cardinality: mandatory, one + SPDXIdentifier common.ElementID `json:"SPDXID"` + + // 6.4: Document Name + // Cardinality: mandatory, one + DocumentName string `json:"name"` + + // 6.5: Document Namespace + // Cardinality: mandatory, one + DocumentNamespace string `json:"documentNamespace"` + + // 6.6: External Document References + // Cardinality: optional, one or many + ExternalDocumentReferences []ExternalDocumentRef `json:"externalDocumentRefs,omitempty"` + + // 6.11: Document Comment + // Cardinality: optional, one + DocumentComment string `json:"comment,omitempty"` + + CreationInfo *CreationInfo `json:"creationInfo"` + Packages []*Package `json:"packages,omitempty"` + Files []*File `json:"files,omitempty"` + OtherLicenses []*OtherLicense `json:"hasExtractedLicensingInfos,omitempty"` + Relationships []*Relationship `json:"relationships,omitempty"` + Annotations []*Annotation `json:"annotations,omitempty"` + Snippets []Snippet `json:"snippets,omitempty"` + + // DEPRECATED in version 2.0 of spec + Reviews []*Review `json:"-"` +} + +func (d *Document) ConvertFrom(_ interface{}) error { + d.SPDXVersion = Version + return nil +} + +var _ converter.ConvertFrom = (*Document)(nil) + +func (d *Document) UnmarshalJSON(b []byte) error { + type doc Document + type extras struct { + DocumentDescribes []common.DocElementID `json:"documentDescribes"` + } + + var d2 doc + if err := json.Unmarshal(b, &d2); err != nil { + return err + } + + var e extras + if err := json.Unmarshal(b, &e); err != nil { + return err + } + + *d = Document(d2) + + relationshipExists := map[string]bool{} + serializeRel := func(r *Relationship) string { + return fmt.Sprintf("%v-%v->%v", common.RenderDocElementID(r.RefA), r.Relationship, common.RenderDocElementID(r.RefB)) + } + + // index current list of relationships to ensure no duplication + for _, r := range d.Relationships { + relationshipExists[serializeRel(r)] = true + } + + // build relationships for documentDescribes field + for _, id := range e.DocumentDescribes { + r := &Relationship{ + RefA: common.DocElementID{ + ElementRefID: d.SPDXIdentifier, + }, + RefB: id, + Relationship: common.TypeRelationshipDescribe, + } + + if !relationshipExists[serializeRel(r)] { + d.Relationships = append(d.Relationships, r) + relationshipExists[serializeRel(r)] = true + } + } + + // build relationships for package hasFiles field + for _, p := range d.Packages { + for _, f := range p.hasFiles { + r := &Relationship{ + RefA: common.DocElementID{ + ElementRefID: p.PackageSPDXIdentifier, + }, + RefB: f, + Relationship: common.TypeRelationshipContains, + } + if !relationshipExists[serializeRel(r)] { + d.Relationships = append(d.Relationships, r) + relationshipExists[serializeRel(r)] = true + } + } + + p.hasFiles = nil + } + + return nil +} + +var _ json.Unmarshaler = (*Document)(nil) diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/file.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/file.go similarity index 98% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/file.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/file.go index 150e79f0bb..1433394901 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_2/file.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/file.go @@ -2,7 +2,9 @@ package v2_2 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) // File is a File section of an SPDX Document for version 2.2 of the spec. type File struct { diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/other_license.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/other_license.go similarity index 100% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/other_license.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/other_license.go diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/package.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/package.go similarity index 73% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/package.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/package.go index 2d99e0456b..54de537c3f 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_2/package.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/package.go @@ -2,7 +2,12 @@ package v2_2 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "encoding/json" + "strings" + + "github.com/spdx/tools-golang/spdx/v2/common" +) // Package is a Package section of an SPDX Document for version 2.2 of the spec. type Package struct { @@ -43,12 +48,12 @@ type Package struct { // 7.8: FilesAnalyzed // Cardinality: optional, one; default value is "true" if omitted - FilesAnalyzed bool `json:"filesAnalyzed,omitempty"` + FilesAnalyzed bool `json:"filesAnalyzed"` // NOT PART OF SPEC: did FilesAnalyzed tag appear? IsFilesAnalyzedTagPresent bool `json:"-"` // 7.9: Package Verification Code - PackageVerificationCode common.PackageVerificationCode `json:"packageVerificationCode"` + PackageVerificationCode common.PackageVerificationCode `json:"packageVerificationCode,omitempty"` // 7.10: Package Checksum: may have keys for SHA1, SHA256, SHA512 and/or MD5 // Cardinality: optional, one or many @@ -101,7 +106,7 @@ type Package struct { // 7.22: Package External Reference Comment // Cardinality: conditional (optional, one) for each External Reference - // contained within PackageExternalReference2_1 struct, if present + // contained within PackageExternalReference struct, if present // 7.23: Package Attribution Text // Cardinality: optional, one or many @@ -111,8 +116,44 @@ type Package struct { Files []*File `json:"files,omitempty"` Annotations []Annotation `json:"annotations,omitempty"` + + // this field is only used when decoding JSON to translate the hasFiles + // property to relationships + hasFiles []common.DocElementID } +func (p *Package) UnmarshalJSON(b []byte) error { + type pkg Package + type extras struct { + HasFiles []common.DocElementID `json:"hasFiles"` + FilesAnalyzed *bool `json:"filesAnalyzed"` + } + + var p2 pkg + if err := json.Unmarshal(b, &p2); err != nil { + return err + } + + var e extras + if err := json.Unmarshal(b, &e); err != nil { + return err + } + + *p = Package(p2) + + p.hasFiles = e.HasFiles + // FilesAnalyzed defaults to true if omitted + if e.FilesAnalyzed == nil { + p.FilesAnalyzed = true + } else { + p.IsFilesAnalyzedTagPresent = true + } + + return nil +} + +var _ json.Unmarshaler = (*Package)(nil) + // PackageExternalReference is an External Reference to additional info // about a Package, as defined in section 7.21 in version 2.2 of the spec. type PackageExternalReference struct { @@ -131,3 +172,32 @@ type PackageExternalReference struct { // Cardinality: conditional (optional, one) for each External Reference ExternalRefComment string `json:"comment,omitempty"` } + +var _ json.Unmarshaler = (*PackageExternalReference)(nil) + +func (r *PackageExternalReference) UnmarshalJSON(b []byte) error { + type ref PackageExternalReference + var rr ref + if err := json.Unmarshal(b, &rr); err != nil { + return err + } + + *r = PackageExternalReference(rr) + r.Category = strings.ReplaceAll(r.Category, "_", "-") + + return nil +} + +var _ json.Marshaler = (*PackageExternalReference)(nil) + +// We output as the JSON type enums since in v2.2.0 the JSON schema +// spec only had enums with _ (e.g. PACKAGE_MANAGER) +func (r *PackageExternalReference) MarshalJSON() ([]byte, error) { + type ref PackageExternalReference + var rr ref + + rr = ref(*r) + rr.Category = strings.ReplaceAll(rr.Category, "-", "_") + + return json.Marshal(&rr) +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/relationship.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/relationship.go similarity index 92% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/relationship.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/relationship.go index a93baa714d..47df33378f 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_2/relationship.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/relationship.go @@ -2,7 +2,9 @@ package v2_2 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) // Relationship is a Relationship section of an SPDX Document for // version 2.2 of the spec. diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/review.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/review.go similarity index 100% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/review.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/review.go diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/snippet.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/snippet.go similarity index 96% rename from vendor/github.com/spdx/tools-golang/spdx/v2_2/snippet.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/snippet.go index 61045f1e02..473c5a11cc 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_2/snippet.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/snippet.go @@ -2,7 +2,9 @@ package v2_2 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) // Snippet is a Snippet section of an SPDX Document for version 2.2 of the spec. type Snippet struct { diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/annotation.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/annotation.go similarity index 88% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/annotation.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/annotation.go index 121e995235..338394cf60 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/annotation.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/annotation.go @@ -2,9 +2,11 @@ package v2_3 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) -// Annotation is an Annotation section of an SPDX Document for version 2.3 of the spec. +// Annotation is an Annotation section of an SPDX Document type Annotation struct { // 12.1: Annotator // Cardinality: conditional (mandatory, one) if there is an Annotation diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/creation_info.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/creation_info.go similarity index 80% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/creation_info.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/creation_info.go index 33b2caf070..84d5bf082e 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/creation_info.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/creation_info.go @@ -2,14 +2,15 @@ package v2_3 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) -// CreationInfo is a Document Creation Information section of an -// SPDX Document for version 2.3 of the spec. +// CreationInfo is a Document Creation Information section of an SPDX Document type CreationInfo struct { // 6.7: License List Version // Cardinality: optional, one - LicenseListVersion string `json:"licenseListVersion"` + LicenseListVersion string `json:"licenseListVersion,omitempty"` // 6.8: Creators: may have multiple keys for Person, Organization // and/or Tool diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/document.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/document.go new file mode 100644 index 0000000000..279e976ccd --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/document.go @@ -0,0 +1,150 @@ +// Package v2_3 Package contains the struct definition for an SPDX Document +// and its constituent parts. +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +package v2_3 + +import ( + "encoding/json" + "fmt" + + converter "github.com/anchore/go-struct-converter" + + "github.com/spdx/tools-golang/spdx/v2/common" +) + +const Version = "SPDX-2.3" +const DataLicense = "CC0-1.0" + +// ExternalDocumentRef is a reference to an external SPDX document as defined in section 6.6 +type ExternalDocumentRef struct { + // DocumentRefID is the ID string defined in the start of the + // reference. It should _not_ contain the "DocumentRef-" part + // of the mandatory ID string. + DocumentRefID string `json:"externalDocumentId"` + + // URI is the URI defined for the external document + URI string `json:"spdxDocument"` + + // Checksum is the actual hash data + Checksum common.Checksum `json:"checksum"` +} + +// Document is an SPDX Document: +// See https://spdx.github.io/spdx-spec/v2.3/document-creation-information +type Document struct { + // 6.1: SPDX Version; should be in the format "SPDX-" + // Cardinality: mandatory, one + SPDXVersion string `json:"spdxVersion"` + + // 6.2: Data License; should be "CC0-1.0" + // Cardinality: mandatory, one + DataLicense string `json:"dataLicense"` + + // 6.3: SPDX Identifier; should be "DOCUMENT" to represent + // mandatory identifier of SPDXRef-DOCUMENT + // Cardinality: mandatory, one + SPDXIdentifier common.ElementID `json:"SPDXID"` + + // 6.4: Document Name + // Cardinality: mandatory, one + DocumentName string `json:"name"` + + // 6.5: Document Namespace + // Cardinality: mandatory, one + DocumentNamespace string `json:"documentNamespace"` + + // 6.6: External Document References + // Cardinality: optional, one or many + ExternalDocumentReferences []ExternalDocumentRef `json:"externalDocumentRefs,omitempty"` + + // 6.11: Document Comment + // Cardinality: optional, one + DocumentComment string `json:"comment,omitempty"` + + CreationInfo *CreationInfo `json:"creationInfo"` + Packages []*Package `json:"packages,omitempty"` + Files []*File `json:"files,omitempty"` + OtherLicenses []*OtherLicense `json:"hasExtractedLicensingInfos,omitempty"` + Relationships []*Relationship `json:"relationships,omitempty"` + Annotations []*Annotation `json:"annotations,omitempty"` + Snippets []Snippet `json:"snippets,omitempty"` + + // DEPRECATED in version 2.0 of spec + Reviews []*Review `json:"-" yaml:"-"` +} + +func (d *Document) ConvertFrom(_ interface{}) error { + d.SPDXVersion = Version + return nil +} + +var _ converter.ConvertFrom = (*Document)(nil) + +func (d *Document) UnmarshalJSON(b []byte) error { + type doc Document + type extras struct { + DocumentDescribes []common.DocElementID `json:"documentDescribes"` + } + + var d2 doc + if err := json.Unmarshal(b, &d2); err != nil { + return err + } + + var e extras + if err := json.Unmarshal(b, &e); err != nil { + return err + } + + *d = Document(d2) + + relationshipExists := map[string]bool{} + serializeRel := func(r *Relationship) string { + return fmt.Sprintf("%v-%v->%v", common.RenderDocElementID(r.RefA), r.Relationship, common.RenderDocElementID(r.RefB)) + } + + // index current list of relationships to ensure no duplication + for _, r := range d.Relationships { + relationshipExists[serializeRel(r)] = true + } + + // build relationships for documentDescribes field + for _, id := range e.DocumentDescribes { + r := &Relationship{ + RefA: common.DocElementID{ + ElementRefID: d.SPDXIdentifier, + }, + RefB: id, + Relationship: common.TypeRelationshipDescribe, + } + + if !relationshipExists[serializeRel(r)] { + d.Relationships = append(d.Relationships, r) + relationshipExists[serializeRel(r)] = true + } + } + + // build relationships for package hasFiles field + // build relationships for package hasFiles field + for _, p := range d.Packages { + for _, f := range p.hasFiles { + r := &Relationship{ + RefA: common.DocElementID{ + ElementRefID: p.PackageSPDXIdentifier, + }, + RefB: f, + Relationship: common.TypeRelationshipContains, + } + if !relationshipExists[serializeRel(r)] { + d.Relationships = append(d.Relationships, r) + relationshipExists[serializeRel(r)] = true + } + } + + p.hasFiles = nil + } + + return nil +} + +var _ json.Unmarshaler = (*Document)(nil) diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/file.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/file.go similarity index 94% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/file.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/file.go index c472fdb2fc..9f8f28acdb 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/file.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/file.go @@ -2,9 +2,11 @@ package v2_3 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) -// File is a File section of an SPDX Document for version 2.3 of the spec. +// File is a File section of an SPDX Document type File struct { // 8.1: File Name // Cardinality: mandatory, one @@ -74,7 +76,7 @@ type File struct { } // ArtifactOfProject is a DEPRECATED collection of data regarding -// a Package, as defined in sections 8.9-8.11 in version 2.3 of the spec. +// a Package, as defined in sections 8.9-8.11. // NOTE: the JSON schema does not define the structure of this object: // https://github.com/spdx/spdx-spec/blob/development/v2.3.1/schemas/spdx-schema.json#L480 type ArtifactOfProject struct { diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/other_license.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/other_license.go similarity index 90% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/other_license.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/other_license.go index 363bb41253..55971f42a7 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/other_license.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/other_license.go @@ -2,8 +2,7 @@ package v2_3 -// OtherLicense is an Other License Information section of an -// SPDX Document for version 2.3 of the spec. +// OtherLicense is an Other License Information section of an SPDX Document type OtherLicense struct { // 10.1: License Identifier: "LicenseRef-[idstring]" // Cardinality: conditional (mandatory, one) if license is not diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/package.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/package.go similarity index 75% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/package.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/package.go index b9d5b9515b..0acadc27be 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/package.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/package.go @@ -2,9 +2,14 @@ package v2_3 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "encoding/json" + "strings" -// Package is a Package section of an SPDX Document for version 2.3 of the spec. + "github.com/spdx/tools-golang/spdx/v2/common" +) + +// Package is a Package section of an SPDX Document type Package struct { // NOT PART OF SPEC // flag: does this "package" contain files that were in fact "unpackaged", @@ -43,7 +48,7 @@ type Package struct { // 7.8: FilesAnalyzed // Cardinality: optional, one; default value is "true" if omitted - FilesAnalyzed bool `json:"filesAnalyzed,omitempty"` + FilesAnalyzed bool `json:"filesAnalyzed"` // NOT PART OF SPEC: did FilesAnalyzed tag appear? IsFilesAnalyzedTagPresent bool `json:"-" yaml:"-"` @@ -81,8 +86,8 @@ type Package struct { PackageLicenseComments string `json:"licenseComments,omitempty"` // 7.17: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" - // Cardinality: mandatory, one - PackageCopyrightText string `json:"copyrightText"` + // Cardinality: optional, zero or one + PackageCopyrightText string `json:"copyrightText,omitempty"` // 7.18: Package Summary Description // Cardinality: optional, one @@ -102,7 +107,7 @@ type Package struct { // 7.22: Package External Reference Comment // Cardinality: conditional (optional, one) for each External Reference - // contained within PackageExternalReference2_1 struct, if present + // contained within PackageExternalReference struct, if present // 7.23: Package Attribution Text // Cardinality: optional, one or many @@ -129,10 +134,47 @@ type Package struct { Files []*File `json:"files,omitempty"` Annotations []Annotation `json:"annotations,omitempty"` + + // this field is only used when decoding JSON to translate the hasFiles + // property to relationships + hasFiles []common.DocElementID } +func (p *Package) UnmarshalJSON(b []byte) error { + type pkg Package + type extras struct { + HasFiles []common.DocElementID `json:"hasFiles"` + FilesAnalyzed *bool `json:"filesAnalyzed"` + } + + var p2 pkg + if err := json.Unmarshal(b, &p2); err != nil { + return err + } + + var e extras + if err := json.Unmarshal(b, &e); err != nil { + return err + } + + *p = Package(p2) + + p.hasFiles = e.HasFiles + + // FilesAnalyzed defaults to true if omitted + if e.FilesAnalyzed == nil { + p.FilesAnalyzed = true + } else { + p.IsFilesAnalyzedTagPresent = true + } + + return nil +} + +var _ json.Unmarshaler = (*Package)(nil) + // PackageExternalReference is an External Reference to additional info -// about a Package, as defined in section 7.21 in version 2.3 of the spec. +// about a Package, as defined in section 7.21 type PackageExternalReference struct { // category is "SECURITY", "PACKAGE-MANAGER" or "OTHER" Category string `json:"referenceCategory"` @@ -149,3 +191,31 @@ type PackageExternalReference struct { // Cardinality: conditional (optional, one) for each External Reference ExternalRefComment string `json:"comment,omitempty"` } + +var _ json.Unmarshaler = (*PackageExternalReference)(nil) + +func (r *PackageExternalReference) UnmarshalJSON(b []byte) error { + type ref PackageExternalReference + var rr ref + if err := json.Unmarshal(b, &rr); err != nil { + return err + } + + rr.Category = strings.ReplaceAll(rr.Category, "_", "-") + + *r = PackageExternalReference(rr) + return nil +} + +var _ json.Marshaler = (*PackageExternalReference)(nil) + +func (r *PackageExternalReference) MarshalJSON() ([]byte, error) { + type ref PackageExternalReference + var rr ref + + rr = ref(*r) + + rr.Category = strings.ReplaceAll(rr.Category, "_", "-") + + return json.Marshal(&rr) +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/relationship.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/relationship.go similarity index 81% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/relationship.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/relationship.go index af4c07d164..d5cd8d8ba1 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/relationship.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/relationship.go @@ -2,10 +2,11 @@ package v2_3 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) -// Relationship is a Relationship section of an SPDX Document for -// version 2.3 of the spec. +// Relationship is a Relationship section of an SPDX Document type Relationship struct { // 11.1: Relationship diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/review.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/review.go similarity index 89% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/review.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/review.go index 0463807fbd..cf1a1c71c5 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/review.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/review.go @@ -2,7 +2,7 @@ package v2_3 -// Review is a Review section of an SPDX Document for version 2.3 of the spec. +// Review is a Review section of an SPDX Document. // DEPRECATED in version 2.0 of spec; retained here for compatibility. type Review struct { diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/snippet.go b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/snippet.go similarity index 92% rename from vendor/github.com/spdx/tools-golang/spdx/v2_3/snippet.go rename to vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/snippet.go index d55a1a968f..9c479d2323 100644 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/snippet.go +++ b/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/snippet.go @@ -2,9 +2,11 @@ package v2_3 -import "github.com/spdx/tools-golang/spdx/common" +import ( + "github.com/spdx/tools-golang/spdx/v2/common" +) -// Snippet is a Snippet section of an SPDX Document for version 2.3 of the spec. +// Snippet is a Snippet section of an SPDX Document type Snippet struct { // 9.1: Snippet SPDX Identifier: "SPDXRef-[idstring]" diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/document.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/document.go deleted file mode 100644 index 32fdb8db84..0000000000 --- a/vendor/github.com/spdx/tools-golang/spdx/v2_3/document.go +++ /dev/null @@ -1,65 +0,0 @@ -// Package spdx contains the struct definition for an SPDX Document -// and its constituent parts. -// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later -package v2_3 - -import "github.com/spdx/tools-golang/spdx/common" - -// ExternalDocumentRef is a reference to an external SPDX document -// as defined in section 6.6 for version 2.3 of the spec. -type ExternalDocumentRef struct { - // DocumentRefID is the ID string defined in the start of the - // reference. It should _not_ contain the "DocumentRef-" part - // of the mandatory ID string. - DocumentRefID string `json:"externalDocumentId"` - - // URI is the URI defined for the external document - URI string `json:"spdxDocument"` - - // Checksum is the actual hash data - Checksum common.Checksum `json:"checksum"` -} - -// Document is an SPDX Document for version 2.3 of the spec. -// See https://spdx.github.io/spdx-spec/v2.3/document-creation-information -type Document struct { - // 6.1: SPDX Version; should be in the format "SPDX-2.3" - // Cardinality: mandatory, one - SPDXVersion string `json:"spdxVersion"` - - // 6.2: Data License; should be "CC0-1.0" - // Cardinality: mandatory, one - DataLicense string `json:"dataLicense"` - - // 6.3: SPDX Identifier; should be "DOCUMENT" to represent - // mandatory identifier of SPDXRef-DOCUMENT - // Cardinality: mandatory, one - SPDXIdentifier common.ElementID `json:"SPDXID"` - - // 6.4: Document Name - // Cardinality: mandatory, one - DocumentName string `json:"name"` - - // 6.5: Document Namespace - // Cardinality: mandatory, one - DocumentNamespace string `json:"documentNamespace"` - - // 6.6: External Document References - // Cardinality: optional, one or many - ExternalDocumentReferences []ExternalDocumentRef `json:"externalDocumentRefs,omitempty"` - - // 6.11: Document Comment - // Cardinality: optional, one - DocumentComment string `json:"comment,omitempty"` - - CreationInfo *CreationInfo `json:"creationInfo"` - Packages []*Package `json:"packages,omitempty"` - Files []*File `json:"files,omitempty"` - OtherLicenses []*OtherLicense `json:"hasExtractedLicensingInfos,omitempty"` - Relationships []*Relationship `json:"relationships,omitempty"` - Annotations []*Annotation `json:"annotations,omitempty"` - Snippets []Snippet `json:"snippets,omitempty"` - - // DEPRECATED in version 2.0 of spec - Reviews []*Review `json:"-" yaml:"-"` -} diff --git a/vendor/modules.txt b/vendor/modules.txt index efb2ec1278..85183f5eab 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -87,6 +87,9 @@ github.com/RackSec/srslog # github.com/agext/levenshtein v1.2.3 ## explicit github.com/agext/levenshtein +# github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 +## explicit; go 1.18 +github.com/anchore/go-struct-converter # github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 ## explicit github.com/armon/circbuf @@ -363,11 +366,12 @@ github.com/containerd/go-cni # github.com/containerd/go-runc v1.1.0 ## explicit; go 1.18 github.com/containerd/go-runc -# github.com/containerd/nydus-snapshotter v0.3.1 -## explicit; go 1.17 +# github.com/containerd/nydus-snapshotter v0.8.2 +## explicit; go 1.19 github.com/containerd/nydus-snapshotter/pkg/converter github.com/containerd/nydus-snapshotter/pkg/converter/tool github.com/containerd/nydus-snapshotter/pkg/errdefs +github.com/containerd/nydus-snapshotter/pkg/label # github.com/containerd/stargz-snapshotter/estargz v0.14.3 ## explicit; go 1.19 github.com/containerd/stargz-snapshotter/estargz @@ -375,9 +379,6 @@ github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/containerd/ttrpc v1.2.2 ## explicit; go 1.13 github.com/containerd/ttrpc -# github.com/containerd/typeurl v1.0.2 -## explicit; go 1.13 -github.com/containerd/typeurl # github.com/containerd/typeurl/v2 v2.1.1 ## explicit; go 1.13 github.com/containerd/typeurl/v2 @@ -664,8 +665,8 @@ github.com/mitchellh/hashstructure/v2 # github.com/mitchellh/reflectwalk v1.0.2 ## explicit github.com/mitchellh/reflectwalk -# github.com/moby/buildkit v0.11.7-0.20230723230859-616c3f613b54 -## explicit; go 1.18 +# github.com/moby/buildkit v0.12.2 +## explicit; go 1.20 github.com/moby/buildkit/api/services/control github.com/moby/buildkit/api/types github.com/moby/buildkit/cache @@ -691,12 +692,15 @@ github.com/moby/buildkit/control/gateway github.com/moby/buildkit/executor github.com/moby/buildkit/executor/containerdexecutor github.com/moby/buildkit/executor/oci +github.com/moby/buildkit/executor/resources +github.com/moby/buildkit/executor/resources/types github.com/moby/buildkit/executor/runcexecutor github.com/moby/buildkit/exporter github.com/moby/buildkit/exporter/attestation github.com/moby/buildkit/exporter/containerimage github.com/moby/buildkit/exporter/containerimage/exptypes github.com/moby/buildkit/exporter/containerimage/image +github.com/moby/buildkit/exporter/exptypes github.com/moby/buildkit/exporter/local github.com/moby/buildkit/exporter/oci github.com/moby/buildkit/exporter/tar @@ -711,8 +715,10 @@ github.com/moby/buildkit/frontend/dockerfile/dockerignore github.com/moby/buildkit/frontend/dockerfile/instructions github.com/moby/buildkit/frontend/dockerfile/parser github.com/moby/buildkit/frontend/dockerfile/shell +github.com/moby/buildkit/frontend/dockerui github.com/moby/buildkit/frontend/gateway github.com/moby/buildkit/frontend/gateway/client +github.com/moby/buildkit/frontend/gateway/container github.com/moby/buildkit/frontend/gateway/forwarder github.com/moby/buildkit/frontend/gateway/grpcclient github.com/moby/buildkit/frontend/gateway/pb @@ -760,8 +766,6 @@ github.com/moby/buildkit/util/appdefaults github.com/moby/buildkit/util/archutil github.com/moby/buildkit/util/attestation github.com/moby/buildkit/util/bklog -github.com/moby/buildkit/util/buildinfo -github.com/moby/buildkit/util/buildinfo/types github.com/moby/buildkit/util/compression github.com/moby/buildkit/util/cond github.com/moby/buildkit/util/contentutil @@ -995,12 +999,16 @@ github.com/shibumi/go-pathspec # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus -# github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f +# github.com/spdx/tools-golang v0.5.1 ## explicit; go 1.13 +github.com/spdx/tools-golang/convert github.com/spdx/tools-golang/json +github.com/spdx/tools-golang/spdx github.com/spdx/tools-golang/spdx/common -github.com/spdx/tools-golang/spdx/v2_2 -github.com/spdx/tools-golang/spdx/v2_3 +github.com/spdx/tools-golang/spdx/v2/common +github.com/spdx/tools-golang/spdx/v2/v2_1 +github.com/spdx/tools-golang/spdx/v2/v2_2 +github.com/spdx/tools-golang/spdx/v2/v2_3 # github.com/spf13/cobra v1.7.0 ## explicit; go 1.15 github.com/spf13/cobra