diff --git a/.github/workflows/buildkit.yml b/.github/workflows/buildkit.yml index 5ad6f8479f..fdd7633cfa 100644 --- a/.github/workflows/buildkit.yml +++ b/.github/workflows/buildkit.yml @@ -69,15 +69,17 @@ jobs: - name: BuildKit ref run: | - # FIXME(thaJeztah) temporarily overriding version to use for tests; remove with the next release of buildkit + # FIXME(tonistiigi) test suite needs patch moby/buildkit#3567 + # echo "BUILDKIT_REPO=moby/buildkit" >> $GITHUB_ENV # echo "BUILDKIT_REF=$(./hack/buildkit-ref)" >> $GITHUB_ENV - echo "BUILDKIT_REF=3a391492c9d0b7428b6dcaa18c5aa3b5951fdacd" >> $GITHUB_ENV + echo "BUILDKIT_REPO=tonistiigi/buildkit" >> $GITHUB_ENV + echo "BUILDKIT_REF=db67180a1a439efb1547ecf5decd4003ec8f621b" >> $GITHUB_ENV working-directory: moby - name: Checkout BuildKit ${{ env.BUILDKIT_REF }} uses: actions/checkout@v3 with: - repository: "moby/buildkit" + repository: ${{ env.BUILDKIT_REPO }} ref: ${{ env.BUILDKIT_REF }} path: buildkit - diff --git a/builder/builder-next/controller.go b/builder/builder-next/controller.go index f546c8f98f..4fc626f310 100644 --- a/builder/builder-next/controller.go +++ b/builder/builder-next/controller.go @@ -26,6 +26,7 @@ import ( inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline" localremotecache "github.com/moby/buildkit/cache/remotecache/local" "github.com/moby/buildkit/client" + bkconfig "github.com/moby/buildkit/cmd/buildkitd/config" "github.com/moby/buildkit/control" "github.com/moby/buildkit/frontend" dockerfile "github.com/moby/buildkit/frontend/dockerfile/builder" @@ -38,6 +39,7 @@ import ( "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/worker" "github.com/pkg/errors" + "go.etcd.io/bbolt" bolt "go.etcd.io/bbolt" ) @@ -157,6 +159,11 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) { return nil, err } + historyDB, err := bbolt.Open(filepath.Join(opt.Root, "history.db"), 0o600, nil) + if err != nil { + return nil, err + } + gcPolicy, err := getGCPolicy(opt.BuilderConfig, root) if err != nil { return nil, errors.Wrap(err, "could not get builder GC policy") @@ -189,6 +196,7 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) { Transport: rt, Layers: layers, Platforms: archutil.SupportedPlatforms(true), + LeaseManager: lm, } wc := &worker.Controller{} @@ -203,6 +211,14 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) { "gateway.v0": gateway.NewGatewayFrontend(wc), } + var hconf *bkconfig.HistoryConfig + if opt.BuilderConfig.History != nil { + hconf = &bkconfig.HistoryConfig{ + MaxAge: opt.BuilderConfig.History.MaxAge, + MaxEntries: opt.BuilderConfig.History.MaxEntries, + } + } + return control.NewController(control.Opt{ SessionManager: opt.SessionManager, WorkerController: wc, @@ -215,7 +231,11 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) { ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{ "inline": inlineremotecache.ResolveCacheExporterFunc(), }, - Entitlements: getEntitlements(opt.BuilderConfig), + Entitlements: getEntitlements(opt.BuilderConfig), + LeaseManager: lm, + ContentStore: store, + HistoryDB: historyDB, + HistoryConfig: hconf, }) } diff --git a/builder/builder-next/executor_unix.go b/builder/builder-next/executor_unix.go index d0dac090ab..4a1d93c25f 100644 --- a/builder/builder-next/executor_unix.go +++ b/builder/builder-next/executor_unix.go @@ -4,6 +4,7 @@ package buildkit import ( + "context" "os" "path/filepath" "strconv" @@ -68,7 +69,7 @@ type bridgeProvider struct { Root string } -func (p *bridgeProvider) New() (network.Namespace, error) { +func (p *bridgeProvider) New(ctx context.Context, hostname string) (network.Namespace, error) { n, err := p.NetworkByName(networkName) if err != nil { return nil, err @@ -82,6 +83,10 @@ func (p *bridgeProvider) New() (network.Namespace, error) { return iface, nil } +func (p *bridgeProvider) Close() error { + return nil +} + type lnInterface struct { ep *libnetwork.Endpoint sbx *libnetwork.Sandbox diff --git a/builder/builder-next/exporter/export.go b/builder/builder-next/exporter/export.go index e138a6f235..b4a522d6f4 100644 --- a/builder/builder-next/exporter/export.go +++ b/builder/builder-next/exporter/export.go @@ -13,7 +13,6 @@ import ( "github.com/docker/docker/reference" "github.com/moby/buildkit/exporter" "github.com/moby/buildkit/exporter/containerimage/exptypes" - "github.com/moby/buildkit/util/compression" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -103,22 +102,18 @@ func (e *imageExporterInstance) Name() string { return "exporting to image" } -func (e *imageExporterInstance) Config() exporter.Config { - return exporter.Config{ - Compression: compression.Config{ - Type: compression.Default, - }, - } +func (e *imageExporterInstance) Config() *exporter.Config { + return exporter.NewConfig() } -func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) { +func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source, sessionID string) (map[string]string, exporter.DescriptorReference, error) { if len(inp.Refs) > 1 { - return nil, fmt.Errorf("exporting multiple references to image store is currently unsupported") + return nil, nil, fmt.Errorf("exporting multiple references to image store is currently unsupported") } ref := inp.Ref if ref != nil && len(inp.Refs) == 1 { - return nil, fmt.Errorf("invalid exporter input: Ref and Refs are mutually exclusive") + return nil, nil, fmt.Errorf("invalid exporter input: Ref and Refs are mutually exclusive") } // only one loop @@ -137,14 +132,14 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source, case 1: platformsBytes, ok := inp.Metadata[exptypes.ExporterPlatformsKey] if !ok { - return nil, fmt.Errorf("cannot export image, missing platforms mapping") + return nil, nil, fmt.Errorf("cannot export image, missing platforms mapping") } var p exptypes.Platforms if err := json.Unmarshal(platformsBytes, &p); err != nil { - return nil, errors.Wrapf(err, "failed to parse platforms passed to exporter") + return nil, nil, errors.Wrapf(err, "failed to parse platforms passed to exporter") } if len(p.Platforms) != len(inp.Refs) { - return nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs)) + return nil, nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs)) } config = inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, p.Platforms[0].ID)] if v, ok := inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, p.Platforms[0].ID)]; ok { @@ -157,16 +152,16 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source, layersDone := oneOffProgress(ctx, "exporting layers") if err := ref.Finalize(ctx); err != nil { - return nil, layersDone(err) + return nil, nil, layersDone(err) } if err := ref.Extract(ctx, nil); err != nil { - return nil, err + return nil, nil, err } diffIDs, err := e.opt.Differ.EnsureLayer(ctx, ref.ID()) if err != nil { - return nil, layersDone(err) + return nil, nil, layersDone(err) } diffs = make([]digest.Digest, len(diffIDs)) @@ -181,20 +176,20 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source, var err error config, err = emptyImageConfig() if err != nil { - return nil, err + return nil, nil, err } } history, err := parseHistoryFromConfig(config) if err != nil { - return nil, err + return nil, nil, err } diffs, history = normalizeLayersAndHistory(diffs, history, ref) config, err = patchImageConfig(config, diffs, history, inp.Metadata[exptypes.ExporterInlineCache], buildInfo) if err != nil { - return nil, err + return nil, nil, err } configDigest := digest.FromBytes(config) @@ -202,7 +197,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source, configDone := oneOffProgress(ctx, fmt.Sprintf("writing image %s", configDigest)) id, err := e.opt.ImageStore.Create(config) if err != nil { - return nil, configDone(err) + return nil, nil, configDone(err) } _ = configDone(nil) @@ -210,7 +205,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source, for _, targetName := range e.targetNames { tagDone := oneOffProgress(ctx, "naming to "+targetName.String()) if err := e.opt.ReferenceStore.AddTag(targetName, digest.Digest(id), true); err != nil { - return nil, tagDone(err) + return nil, nil, tagDone(err) } _ = tagDone(nil) } @@ -219,5 +214,5 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source, return map[string]string{ exptypes.ExporterImageConfigDigestKey: configDigest.String(), exptypes.ExporterImageDigestKey: id.String(), - }, nil + }, nil, nil } diff --git a/builder/builder-next/worker/worker.go b/builder/builder-next/worker/worker.go index af980bfbfc..59934a2112 100644 --- a/builder/builder-next/worker/worker.go +++ b/builder/builder-next/worker/worker.go @@ -9,6 +9,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/leases" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/rootfs" "github.com/docker/docker/builder/builder-next/adapters/containerimage" @@ -39,6 +40,7 @@ import ( "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/version" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -46,6 +48,10 @@ import ( "golang.org/x/sync/semaphore" ) +func init() { + version.Version = "v0.11.0-rc3" +} + const labelCreatedAt = "buildkit/createdat" // LayerAccess provides access to a moby layer from a snapshot @@ -63,6 +69,7 @@ type Opt struct { Snapshotter snapshot.Snapshotter ContentStore content.Store CacheManager cache.Manager + LeaseManager leases.Manager ImageSource *containerimage.Source DownloadManager *xfer.LayerDownloadManager V2MetadataService distmetadata.V2MetadataService @@ -157,17 +164,42 @@ func (w *Worker) GCPolicy() []client.PruneInfo { return w.Opt.GCPolicy } +// BuildkitVersion returns BuildKit version +func (w *Worker) BuildkitVersion() client.BuildkitVersion { + return client.BuildkitVersion{ + Package: version.Package, + Version: version.Version + "-moby", + Revision: version.Revision, + } +} + +// Close closes the worker and releases all resources +func (w *Worker) Close() error { + return nil +} + // ContentStore returns content store func (w *Worker) ContentStore() content.Store { return w.Opt.ContentStore } +// LeaseManager returns leases.Manager for the worker +func (w *Worker) LeaseManager() leases.Manager { + return w.Opt.LeaseManager +} + // LoadRef loads a reference by ID func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) { var opts []cache.RefOption if hidden { opts = append(opts, cache.NoUpdateLastUsed) } + if id == "" { + // results can have nil refs if they are optimized out to be equal to scratch, + // i.e. Diff(A,A) == scratch + return nil, nil + } + return w.CacheManager().Get(ctx, id, nil, opts...) } diff --git a/daemon/config/builder.go b/daemon/config/builder.go index 08671159b9..52be1b314c 100644 --- a/daemon/config/builder.go +++ b/daemon/config/builder.go @@ -60,6 +60,12 @@ type BuilderGCConfig struct { DefaultKeepStorage string `json:",omitempty"` } +// BuilderHistoryConfig contains history config for a buildkit builder +type BuilderHistoryConfig struct { + MaxAge int64 `json:",omitempty"` + MaxEntries int64 `json:",omitempty"` +} + // BuilderEntitlements contains settings to enable/disable entitlements type BuilderEntitlements struct { NetworkHost *bool `json:"network-host,omitempty"` @@ -68,6 +74,7 @@ type BuilderEntitlements struct { // BuilderConfig contains config for the builder type BuilderConfig struct { - GC BuilderGCConfig `json:",omitempty"` - Entitlements BuilderEntitlements `json:",omitempty"` + GC BuilderGCConfig `json:",omitempty"` + Entitlements BuilderEntitlements `json:",omitempty"` + History *BuilderHistoryConfig `json:",omitempty"` } diff --git a/integration/build/build_session_test.go b/integration/build/build_session_test.go index 2ca31a635d..30a6ca9747 100644 --- a/integration/build/build_session_test.go +++ b/integration/build/build_session_test.go @@ -95,8 +95,8 @@ func testBuildWithSession(t *testing.T, client dclient.APIClient, daemonHost str sess, err := session.NewSession(ctx, "foo1", "foo") assert.Check(t, err) - fsProvider := filesync.NewFSSyncProvider([]filesync.SyncedDir{ - {Dir: dir}, + fsProvider := filesync.NewFSSyncProvider(filesync.StaticDirSource{ + "": {Dir: dir}, }) sess.Allow(fsProvider) diff --git a/vendor.mod b/vendor.mod index 914943c522..746ba8e13e 100644 --- a/vendor.mod +++ b/vendor.mod @@ -56,7 +56,7 @@ require ( github.com/klauspost/compress v1.15.12 github.com/miekg/dns v1.1.43 github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible - github.com/moby/buildkit v0.10.6 + github.com/moby/buildkit v0.11.2 github.com/moby/ipvs v1.1.0 github.com/moby/locker v1.0.1 github.com/moby/patternmatcher v0.5.0 @@ -81,7 +81,7 @@ require ( github.com/sirupsen/logrus v1.9.0 github.com/spf13/cobra v1.6.1 github.com/spf13/pflag v1.0.5 - github.com/tonistiigi/fsutil v0.0.0-20220315205639-9ed612626da3 + github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa github.com/tonistiigi/go-archvariant v1.0.0 github.com/vbatts/tar-split v0.11.2 github.com/vishvananda/netlink v1.2.1-beta.2 @@ -111,13 +111,14 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.2 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.16.16 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v4 v4.1.2 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cilium/ebpf v0.7.0 // indirect github.com/container-storage-interface/spec v1.5.0 // indirect github.com/containerd/console v1.0.3 // indirect github.com/containerd/go-runc v1.0.0 // indirect - github.com/containerd/stargz-snapshotter v0.11.3 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.11.3 // indirect + github.com/containerd/nydus-snapshotter v0.3.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect github.com/containerd/ttrpc v1.1.0 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/dustin/go-humanize v1.0.0 // indirect @@ -141,14 +142,21 @@ require ( github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/in-toto/in-toto-golang v0.5.0 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect + github.com/onsi/ginkgo v1.16.4 // indirect + github.com/onsi/gomega v1.20.1 // indirect + github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 // indirect github.com/philhofer/fwd v1.1.2 // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect + github.com/shibumi/go-pathspec v1.3.0 // indirect + github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f // indirect github.com/tinylib/msgp v1.1.6 // indirect github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.6 // indirect @@ -160,7 +168,10 @@ require ( go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 // indirect go.opentelemetry.io/otel v1.4.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1 // indirect go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect go.opentelemetry.io/otel/metric v0.27.0 // indirect go.opentelemetry.io/otel/sdk v1.4.1 // indirect diff --git a/vendor.sum b/vendor.sum index bba3abfbc2..77b0674eb3 100644 --- a/vendor.sum +++ b/vendor.sum @@ -1,5 +1,4 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -64,26 +63,21 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/AdaLogics/go-fuzz-headers v0.0.0-20221118232415-3345c89a7c72 h1:kq78byqmxX6R9uk4uN3HD2F5tkZJAZMauuLSkNPS8to= github.com/AdaLogics/go-fuzz-headers v0.0.0-20221118232415-3345c89a7c72/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -98,7 +92,6 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= @@ -108,16 +101,12 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= -github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY= github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -136,7 +125,6 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs= @@ -174,7 +162,6 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.16.16 h1:otZvq9r+xjPL7qU/luX2QdBamiN github.com/aws/aws-sdk-go-v2/service/sts v1.16.16/go.mod h1:Y9iBgT1w2vHtYzJEkwD6FqILjDSsvbxcW/+wIYxyse4= github.com/aws/smithy-go v1.13.1 h1:q09BdpUiaqpothcv393ACfWJJHzlzjB5HaNL1XHKmoQ= github.com/aws/smithy-go v1.13.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -194,11 +181,11 @@ github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR github.com/bsphere/le_go v0.0.0-20200109081728-fc06dab2caa8 h1:fcONpniVVbh9+duVZYYbJuc+yGGdLRxTqpk7pTTz/qI= github.com/bsphere/le_go v0.0.0-20200109081728-fc06dab2caa8/go.mod h1:GrjfimWtH8h8EqJSfbO+sTQYV/fAjL/VN7dMeU8XP2Y= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= @@ -242,6 +229,7 @@ github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcK github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo= github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= @@ -258,7 +246,6 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= @@ -276,15 +263,12 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= -github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= github.com/containerd/containerd v1.6.16 h1:0H5xH6ABsN7XTrxIAKxFpBkFCBtrZ/OSORhCpUnHjrc= github.com/containerd/containerd v1.6.16/go.mod h1:1RdCUu95+gc2v9t3IL+zIlpClSmew7/0YS8O5eQZrOw= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -294,7 +278,6 @@ github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cE github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= @@ -306,8 +289,6 @@ github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= -github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= -github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= @@ -318,15 +299,14 @@ github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= -github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/stargz-snapshotter v0.11.3 h1:D3PoF563XmOBdtfx2G6AkhbHueqwIVPBFn2mrsWLa3w= -github.com/containerd/stargz-snapshotter v0.11.3/go.mod h1:2j2EAUyvrLU4D9unYlTIwGhDKQIk74KJ9E71lJsQCVM= +github.com/containerd/nydus-snapshotter v0.3.1 h1:b8WahTrPkt3XsabjG2o/leN4fw3HWZYr+qxo/Z8Mfzk= +github.com/containerd/nydus-snapshotter v0.3.1/go.mod h1:+8R7NX7vrjlxAgtidnsstwIhpzyTlriYPssTxH++uiM= github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= -github.com/containerd/stargz-snapshotter/estargz v0.11.3 h1:k2kN16Px6LYuv++qFqK+JTcYqc8bEVxzGpf8/gFBL5M= -github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0= +github.com/containerd/stargz-snapshotter/estargz v0.13.0 h1:fD7AwuVV+B40p0d9qVkH/Au1qhp8hn/HWJHIYjpEcfw= +github.com/containerd/stargz-snapshotter/estargz v0.13.0/go.mod h1:m+9VaGJGlhCnrcEUod8mYumTmRgblwd3rC5UCEh2Yp0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= @@ -347,21 +327,17 @@ github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNR github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -389,7 +365,6 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= -github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -401,17 +376,13 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.13+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.3-0.20211208011758-87521affb077+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= @@ -447,10 +418,7 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee h1:v6Eju/FhxsACGNipFEPBZZAzGr1F/jlRQr1qiBw2nEE= @@ -458,7 +426,6 @@ github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee/go.mod h1:2H9hjfb github.com/fluent/fluent-logger-golang v1.9.0 h1:zUdY44CHX2oIUc7VTNZc+4m+ORuO/mldQDA7czhWXEg= github.com/fluent/fluent-logger-golang v1.9.0/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -466,7 +433,6 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -485,29 +451,23 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= @@ -595,7 +555,6 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -635,8 +594,6 @@ github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99 github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= @@ -658,7 +615,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hanwen/go-fuse/v2 v2.1.1-0.20220112183258-f57e95bda82d/go.mod h1:B1nGE/6RBFyBRC1RRnf23UpwCdyJ31eukw34oAKukAc= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -667,7 +623,6 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= @@ -682,7 +637,6 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= @@ -714,14 +668,14 @@ github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY= +github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062 h1:G1+wBT0dwjIrBdLy0MIG0i+E4CQxEnedHXdauJEIH6g= github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= -github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -729,7 +683,6 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -748,7 +701,6 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM= github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -764,7 +716,6 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -772,7 +723,6 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -780,7 +730,6 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= @@ -805,8 +754,8 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/moby/buildkit v0.10.6 h1:DJlEuLIgnu34HQKF4n9Eg6q2YqQVC0eOpMb4p2eRS2w= -github.com/moby/buildkit v0.10.6/go.mod h1:tQuuyTWtOb9D+RE425cwOCUkX0/oZ+5iBZ+uWpWQ9bU= +github.com/moby/buildkit v0.11.2 h1:hNNsYuRssvFnp/qJ8FifStEUzROl5riPAEwk7cRzMjg= +github.com/moby/buildkit v0.11.2/go.mod h1:b5hR8j3BZaOj5+gf6yielP9YLT9mU92zy3zZtdoUTrw= github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ= github.com/moby/ipvs v1.1.0/go.mod h1:4VJMWuf098bsUMmZEiD4Tjk/O7mOn3l1PTD3s4OoYAs= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= @@ -815,7 +764,6 @@ github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/pubsub v1.0.0 h1:jkp/imWsmJz2f6LyFsk7EkVeN2HxR/HTTOY8kHrsxfA= github.com/moby/pubsub v1.0.0/go.mod h1:bXSO+3h5MNXXCaEG+6/NlAIk7MMZbySZlnB+cUQhKKc= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/swarmkit/v2 v2.0.0-20230119195359-904c221ac281 h1:E0LdO1cZEXmXrLoojCqEvVCk4cNLWSVotoDbWUmNa8g= github.com/moby/swarmkit/v2 v2.0.0-20230119195359-904c221ac281/go.mod h1:jIgi55SqNJvlQ74bK35NXKWz6JCTexx5h69d0btP2AM= github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs= @@ -823,19 +771,16 @@ github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwK github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI= github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc= github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/moby/term v0.0.0-20221120202655-abb19827d345 h1:J9c53/kxIH+2nTKBEfZYFMlhghtHpIHSXpm5VRGHSnU= github.com/moby/term v0.0.0-20221120202655-abb19827d345/go.mod h1:15ce4BGCFxt7I5NQKT+HV0yEDxmf6fSysfEDiVo3zFM= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -867,8 +812,6 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -879,8 +822,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= -github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -889,7 +832,6 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1 h1:9iFHD5Kt9hkOfeawBNiEeEaV7bmC4/Z5wJp8E9BptMs= github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1/go.mod h1:K/JAU0m27RFhDRX4PcFdIKntROP6y5Ed6O91aZYDQfs= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -898,7 +840,6 @@ github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -916,13 +857,13 @@ github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuh github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4= github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 h1:DiLBVp4DAcZlBVBEtJpNWZpZVq0AEeCY7Hqk8URVs4o= +github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170/go.mod h1:uQd4a7Rh3ZsVg5j0lNyAfyxIeGde9yrlhjF78GzeW0c= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -966,7 +907,6 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= @@ -991,22 +931,22 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rootless-containers/rootlesskit v1.1.0 h1:cRaRIYxY8oce4eE/zeAUZhgKu/4tU1p9YHN4+suwV7M= github.com/rootless-containers/rootlesskit v1.1.0/go.mod h1:H+o9ndNe7tS91WqU0/+vpvc+VaCd7TCIWaJjnV0ujUo= -github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= +github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= +github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= +github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -1024,6 +964,9 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM= +github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f h1:9B623Cfs+mclYK6dsae7gLSwuIBHvlgmEup87qpqsAQ= +github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f/go.mod h1:VHzvNsKAfAGqs4ZvwRL+7a0dNsL20s7lGui4K9C0xQM= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= @@ -1043,7 +986,6 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1066,14 +1008,13 @@ github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOM github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tonistiigi/fsutil v0.0.0-20220315205639-9ed612626da3 h1:T1pEe+WB3SCPVAfVquvfPfagKZU2Z8c1OP3SuGB+id0= -github.com/tonistiigi/fsutil v0.0.0-20220315205639-9ed612626da3/go.mod h1:oPAfvw32vlUJSjyDcQ3Bu0nb2ON2B+G0dtVN/SZNJiA= +github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa h1:XOFp/3aBXlqmOFAg3r6e0qQjPnK5I970LilqX+Is1W8= +github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa/go.mod h1:AvLEd1LEIl64G2Jpgwo7aVV5lGH0ePcKl0ygGIHNYl8= github.com/tonistiigi/go-archvariant v1.0.0 h1:5LC1eDWiBNflnTF1prCiX09yfNHIxDC/aukdhCdTyb0= github.com/tonistiigi/go-archvariant v1.0.0/go.mod h1:TxFmO5VS6vMq2kvs3ht04iPXtu2rUT/erOnGFYfk5Ho= github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0= github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= @@ -1085,13 +1026,11 @@ github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaW github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.2 h1:Cn05BRLm+iRP/DZxyVSsfVyrzgjDbwHwkVt38qvXnNI= github.com/vishvananda/netns v0.0.2/go.mod h1:yitZXdAVI+yPFSb4QUe+VW3vOVl4PZPNcBgbPxAtJxw= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -1116,22 +1055,15 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.6 h1:TXQWYceBKqLp4sa87rcPs11SXxUA/mHwH975v+BDvLU= go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v2 v2.305.6/go.mod h1:BHha8XJGe8vCIBfWBpbBLVZ4QjOIlfoouvOwydu63E0= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.etcd.io/etcd/client/v3 v3.5.6/go.mod h1:f6GRinRMCsFVv9Ht42EyY7nfsVGwrNO0WEoS2pRKzQk= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/pkg/v3 v3.5.6 h1:k1GZrGrfMHy5/cg2bxNGsmLTFisatyhDYCFLRuaavWg= go.etcd.io/etcd/pkg/v3 v3.5.6/go.mod h1:qATwUzDb6MLyGWq2nUj+jwXqZJcxkCuabh0P7Cuff3k= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/raft/v3 v3.5.6 h1:tOmx6Ym6rn2GpZOrvTGJZciJHek6RnC3U/zNInzIN50= go.etcd.io/etcd/raft/v3 v3.5.6/go.mod h1:wL8kkRGx1Hp8FmZUuHfL3K2/OaGIDaXGr1N7i2G07J0= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= go.etcd.io/etcd/server/v3 v3.5.6 h1:RXuwaB8AMiV62TqcqIt4O4bG8NWjsxOkDJVT3MZI5Ds= go.etcd.io/etcd/server/v3 v3.5.6/go.mod h1:6/Gfe8XTGXQJgLYQ65oGKMfPivb2EASLUSMSWN9Sroo= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= @@ -1143,55 +1075,40 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 h1:n9b7AAdbQtQ0k9dm0Dm2/KUcUqtG8i2O15KzNaDze8c= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0/go.mod h1:LsankqVDx4W+RhZNA5uWarULII/MBhF5qwCYxTuyXjs= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 h1:Wjp9vsVSIEyvdiaECfqxY9xBqQ7JaSCGtvHgR4doXZk= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0/go.mod h1:vHItvsnJtp7ES++nFLLFBzUWny7fJQSvTlxFcqQGUr4= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 h1:SLme4Porm+UwX0DdHMxlwRt7FzPSE0sys81bet2o0pU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0/go.mod h1:tLYsuf2v8fZreBVwp9gVMhefZlLFZaUiNVSq8QxXRII= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= -go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk= go.opentelemetry.io/otel v1.4.1 h1:QbINgGDDcoQUoMJa2mMaWno49lja9sHwp6aoa2n3a4g= go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 h1:imIM3vRDMyZK1ypQlQlO+brE22I9lRhJsBDXpDWjlz8= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 h1:WPpPsAAs8I2rA47v5u0558meKmmwm1Dj99ZbqCV8sZ8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1/go.mod h1:o5RW5o2pKpJLD5dNTCmjF1DorYwMeFJmb/rKr5sLaa8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1 h1:AxqDiGk8CorEXStMDZF5Hz9vo9Z7ZZ+I5m8JRl/ko40= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1/go.mod h1:c6E4V3/U+miqjs/8l950wggHGL1qzlp0Ypj9xoGrPqo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1 h1:8qOago/OqoFclMUUj/184tZyRdDZFpcejSjbk5Jrl6Y= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1/go.mod h1:VwYo0Hak6Efuy0TXsZs8o1hnV3dHDPNtDbycG0hI8+M= go.opentelemetry.io/otel/internal/metric v0.27.0 h1:9dAVGAfFiiEq5NVB9FUJ5et+btbDQAUIJehJ+ikyryk= go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= go.opentelemetry.io/otel/metric v0.27.0 h1:HhJPsGhJoKRSegPQILFbODU56NS/L1UE4fS1sC5kIwQ= go.opentelemetry.io/otel/metric v0.27.0/go.mod h1:raXDJ7uP2/Jc0nVZWQjJtzoyssOYWu/+pjZqRzfvZ7g= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= -go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= go.opentelemetry.io/otel/sdk v1.4.1 h1:J7EaW71E0v87qflB4cDolaqq3AcujGrtyIPGQoZOB0Y= go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= -go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE= go.opentelemetry.io/otel/trace v1.4.1 h1:O+16qcdTrT7zxv2J6GejTPFinSwA++cYerC5iSiF8EQ= go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= -go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c= go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1199,7 +1116,6 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= @@ -1225,9 +1141,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE= golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= @@ -1317,14 +1231,10 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1425,7 +1335,6 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1433,7 +1342,6 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1452,7 +1360,6 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210313202042-bd2e13477e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1470,10 +1377,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1497,10 +1401,7 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1521,7 +1422,6 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1547,7 +1447,6 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1686,7 +1585,6 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1772,7 +1670,6 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= @@ -1806,7 +1703,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -1831,7 +1727,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -1850,56 +1745,35 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= -k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= -k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= -k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= -k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= -k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= -k8s.io/cri-api v0.24.0-alpha.3/go.mod h1:c/NLI5Zdyup5+oEYqFO2IE32ptofNiZpS1nL2y51gAg= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= resenje.org/singleflight v0.3.0 h1:USJtsAN6HTUA827ksc+2Kcr7QZ4HBq/z/P8ugVbqKFY= resenje.org/singleflight v0.3.0/go.mod h1:lAgQK7VfjG6/pgredbQfmV0RvG/uVhKo6vSuZ0vCWfk= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -1907,12 +1781,9 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore new file mode 100644 index 0000000000..50d95c548b --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# IDEs +.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v4/.travis.yml b/vendor/github.com/cenkalti/backoff/v4/.travis.yml new file mode 100644 index 0000000000..c79105c2fb --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.13 + - 1.x + - tip +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE new file mode 100644 index 0000000000..89b8179965 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md new file mode 100644 index 0000000000..16abdfc084 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/README.md @@ -0,0 +1,32 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. + +Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[travis]: https://travis-ci.org/cenkalti/backoff +[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go new file mode 100644 index 0000000000..3676ee405d --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go new file mode 100644 index 0000000000..48482330eb --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/context.go @@ -0,0 +1,62 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { // nolint: golint + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func getContext(b BackOff) context.Context { + if cb, ok := b.(BackOffContext); ok { + return cb.Context() + } + if tb, ok := b.(*backOffTries); ok { + return getContext(tb.delegate) + } + return context.Background() +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + return b.BackOff.NextBackOff() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go new file mode 100644 index 0000000000..3d3453215b --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -0,0 +1,158 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff returns Stop. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Stop time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Stop: Stop, + Clock: SystemClock, + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + elapsed := b.GetElapsedTime() + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { + return b.Stop + } + return next +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go new file mode 100644 index 0000000000..1ce2507ebc --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -0,0 +1,112 @@ +package backoff + +import ( + "errors" + "time" +) + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) +} + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) +} + +// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer +// for each failed attempt before sleep. +// A default timer that uses system timer is used when nil is passed. +func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { + var err error + var next time.Duration + if t == nil { + t = &defaultTimer{} + } + + defer func() { + t.Stop() + }() + + ctx := getContext(b) + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + var permanent *PermanentError + if errors.As(err, &permanent) { + return permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + if cerr := ctx.Err(); cerr != nil { + return cerr + } + + return err + } + + if notify != nil { + notify(err, next) + } + + t.Start(next) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C(): + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +func (e *PermanentError) Unwrap() error { + return e.Err +} + +func (e *PermanentError) Is(target error) bool { + _, ok := target.(*PermanentError) + return ok +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go new file mode 100644 index 0000000000..df9d68bce5 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go @@ -0,0 +1,97 @@ +package backoff + +import ( + "context" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + ctx context.Context + timer Timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + return NewTickerWithTimer(b, &defaultTimer{}) +} + +// NewTickerWithTimer returns a new Ticker with a custom timer. +// A default timer that uses system timer is used when nil is passed. +func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + if timer == nil { + timer = &defaultTimer{} + } + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + ctx: getContext(b), + timer: timer, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.ctx.Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go new file mode 100644 index 0000000000..8120d0213c --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type Timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go new file mode 100644 index 0000000000..28d58ca37c --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/tries.go @@ -0,0 +1,38 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries == 0 { + return Stop + } + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/containerd/containerd/contrib/seccomp/kernelversion/kernel_linux.go b/vendor/github.com/containerd/containerd/contrib/seccomp/kernelversion/kernel_linux.go new file mode 100644 index 0000000000..ab288fbb8a --- /dev/null +++ b/vendor/github.com/containerd/containerd/contrib/seccomp/kernelversion/kernel_linux.go @@ -0,0 +1,92 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + File copied and customized based on + https://github.com/moby/moby/tree/v20.10.14/profiles/seccomp/kernel_linux.go +*/ + +package kernelversion + +import ( + "bytes" + "fmt" + "sync" + + "golang.org/x/sys/unix" +) + +// KernelVersion holds information about the kernel. +type KernelVersion struct { + Kernel uint64 // Version of the Kernel (i.e., the "4" in "4.1.2-generic") + Major uint64 // Major revision of the Kernel (i.e., the "1" in "4.1.2-generic") +} + +// String implements fmt.Stringer for KernelVersion +func (k *KernelVersion) String() string { + if k.Kernel > 0 || k.Major > 0 { + return fmt.Sprintf("%d.%d", k.Kernel, k.Major) + } + return "" +} + +var ( + currentKernelVersion *KernelVersion + kernelVersionError error + once sync.Once +) + +// getKernelVersion gets the current kernel version. +func getKernelVersion() (*KernelVersion, error) { + once.Do(func() { + var uts unix.Utsname + if err := unix.Uname(&uts); err != nil { + return + } + // Remove the \x00 from the release for Atoi to parse correctly + currentKernelVersion, kernelVersionError = parseRelease(string(uts.Release[:bytes.IndexByte(uts.Release[:], 0)])) + }) + return currentKernelVersion, kernelVersionError +} + +// parseRelease parses a string and creates a KernelVersion based on it. +func parseRelease(release string) (*KernelVersion, error) { + var version = KernelVersion{} + + // We're only make sure we get the "kernel" and "major revision". Sometimes we have + // 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64. + _, err := fmt.Sscanf(release, "%d.%d", &version.Kernel, &version.Major) + if err != nil { + return nil, fmt.Errorf("failed to parse kernel version %q: %w", release, err) + } + return &version, nil +} + +// GreaterEqualThan checks if the host's kernel version is greater than, or +// equal to the given kernel version v. Only "kernel version" and "major revision" +// can be specified (e.g., "3.12") and will be taken into account, which means +// that 3.12.25-gentoo and 3.12-1-amd64 are considered equal (kernel: 3, major: 12). +func GreaterEqualThan(minVersion KernelVersion) (bool, error) { + kv, err := getKernelVersion() + if err != nil { + return false, err + } + if kv.Kernel > minVersion.Kernel { + return true, nil + } + if kv.Kernel == minVersion.Kernel && kv.Major >= minVersion.Major { + return true, nil + } + return false, nil +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/snapshot/overlayutils/check.go b/vendor/github.com/containerd/containerd/snapshots/overlay/overlayutils/check.go similarity index 82% rename from vendor/github.com/containerd/stargz-snapshotter/snapshot/overlayutils/check.go rename to vendor/github.com/containerd/containerd/snapshots/overlay/overlayutils/check.go index e76c0b3a56..17e7547feb 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/snapshot/overlayutils/check.go +++ b/vendor/github.com/containerd/containerd/snapshots/overlay/overlayutils/check.go @@ -1,3 +1,6 @@ +//go:build linux +// +build linux + /* Copyright The containerd Authors. @@ -14,25 +17,26 @@ limitations under the License. */ -// ===== -// NOTE: This file is ported from https://github.com/containerd/containerd/blob/v1.5.2/snapshots/overlay/overlayutils/check.go -// TODO: import this from containerd package once we drop support to continerd v1.4.x -// ===== - package overlayutils import ( "fmt" - "io/ioutil" "os" "path/filepath" + "syscall" + kernel "github.com/containerd/containerd/contrib/seccomp/kernelversion" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" - userns "github.com/containerd/containerd/sys" + "github.com/containerd/containerd/pkg/userns" "github.com/containerd/continuity/fs" ) +const ( + // see https://man7.org/linux/man-pages/man2/statfs.2.html + tmpfsMagic = 0x01021994 +) + // SupportsMultipleLowerDir checks if the system supports multiple lowerdirs, // which is required for the overlay snapshotter. On 4.x kernels, multiple lowerdirs // are always available (so this check isn't needed), and backported to RHEL and @@ -41,7 +45,7 @@ import ( // // Ported from moby overlay2. func SupportsMultipleLowerDir(d string) error { - td, err := ioutil.TempDir(d, "multiple-lowerdir-check") + td, err := os.MkdirTemp(d, "multiple-lowerdir-check") if err != nil { return err } @@ -90,6 +94,21 @@ func Supported(root string) error { return SupportsMultipleLowerDir(root) } +// IsPathOnTmpfs returns whether the path is on a tmpfs or not. +// +// It uses statfs to check if the fs type is TMPFS_MAGIC (0x01021994) +// see https://man7.org/linux/man-pages/man2/statfs.2.html +func IsPathOnTmpfs(d string) bool { + stat := syscall.Statfs_t{} + err := syscall.Statfs(d, &stat) + if err != nil { + log.L.WithError(err).Warnf("Could not retrieve statfs for %v", d) + return false + } + + return stat.Type == tmpfsMagic +} + // NeedsUserXAttr returns whether overlayfs should be mounted with the "userxattr" mount option. // // The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11. @@ -116,10 +135,19 @@ func NeedsUserXAttr(d string) (bool, error) { return false, nil } - // TODO: add fast path for kernel >= 5.11 . + // userxattr not permitted on tmpfs https://man7.org/linux/man-pages/man5/tmpfs.5.html + if IsPathOnTmpfs(d) { + return false, nil + } + + // Fast path on kernels >= 5.11 // - // Keep in mind that distro vendors might be going to backport the patch to older kernels. - // So we can't completely remove the check. + // Keep in mind that distro vendors might be going to backport the patch to older kernels + // so we can't completely remove the "slow path". + fiveDotEleven := kernel.KernelVersion{Kernel: 5, Major: 11} + if ok, err := kernel.GreaterEqualThan(fiveDotEleven); err == nil && ok { + return true, nil + } tdRoot := filepath.Join(d, "userxattr-check") if err := os.RemoveAll(tdRoot); err != nil { @@ -136,7 +164,7 @@ func NeedsUserXAttr(d string) (bool, error) { } }() - td, err := ioutil.TempDir(tdRoot, "") + td, err := os.MkdirTemp(tdRoot, "") if err != nil { return false, err } diff --git a/vendor/github.com/containerd/stargz-snapshotter/LICENSE b/vendor/github.com/containerd/nydus-snapshotter/LICENSE similarity index 100% rename from vendor/github.com/containerd/stargz-snapshotter/LICENSE rename to vendor/github.com/containerd/nydus-snapshotter/LICENSE diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go new file mode 100644 index 0000000000..b7b9f2a2b7 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +const ( + ManifestOSFeatureNydus = "nydus.remoteimage.v1" + MediaTypeNydusBlob = "application/vnd.oci.image.layer.nydus.blob.v1" + BootstrapFileNameInLayer = "image/image.boot" + + ManifestNydusCache = "containerd.io/snapshot/nydus-cache" + + LayerAnnotationFSVersion = "containerd.io/snapshot/nydus-fs-version" + LayerAnnotationNydusBlob = "containerd.io/snapshot/nydus-blob" + LayerAnnotationNydusBlobDigest = "containerd.io/snapshot/nydus-blob-digest" + LayerAnnotationNydusBlobSize = "containerd.io/snapshot/nydus-blob-size" + LayerAnnotationNydusBlobIDs = "containerd.io/snapshot/nydus-blob-ids" + LayerAnnotationNydusBootstrap = "containerd.io/snapshot/nydus-bootstrap" + LayerAnnotationNydusSourceChainID = "containerd.io/snapshot/nydus-source-chainid" + + LayerAnnotationNydusReferenceBlobIDs = "containerd.io/snapshot/nydus-reference-blob-ids" + + LayerAnnotationUncompressed = "containerd.io/uncompressed" +) diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go new file mode 100644 index 0000000000..dc0130aefe --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go @@ -0,0 +1,839 @@ +//go:build !windows +// +build !windows + +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +import ( + "archive/tar" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "syscall" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/images/converter" + "github.com/containerd/containerd/labels" + "github.com/containerd/fifo" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + + "github.com/containerd/nydus-snapshotter/pkg/converter/tool" + "github.com/containerd/nydus-snapshotter/pkg/errdefs" +) + +const bootstrapNameInTar = "image.boot" +const blobNameInTar = "image.blob" + +const envNydusBuilder = "NYDUS_BUILDER" +const envNydusWorkDir = "NYDUS_WORKDIR" + +const configGCLabelKey = "containerd.io/gc.ref.content.config" + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +func getBuilder(specifiedPath string) string { + if specifiedPath != "" { + return specifiedPath + } + + builderPath := os.Getenv(envNydusBuilder) + if builderPath != "" { + return builderPath + } + + return "nydus-image" +} + +func ensureWorkDir(specifiedBasePath string) (string, error) { + var baseWorkDir string + + if specifiedBasePath != "" { + baseWorkDir = specifiedBasePath + } else { + baseWorkDir = os.Getenv(envNydusWorkDir) + } + if baseWorkDir == "" { + baseWorkDir = os.TempDir() + } + + if err := os.MkdirAll(baseWorkDir, 0750); err != nil { + return "", errors.Wrapf(err, "create base directory %s", baseWorkDir) + } + + workDirPath, err := os.MkdirTemp(baseWorkDir, "nydus-converter-") + if err != nil { + return "", errors.Wrap(err, "create work directory") + } + + return workDirPath, nil +} + +// Unpack a OCI formatted tar stream into a directory. +func unpackOciTar(ctx context.Context, dst string, reader io.Reader) error { + ds, err := compression.DecompressStream(reader) + if err != nil { + return errors.Wrap(err, "unpack stream") + } + defer ds.Close() + + if _, err := archive.Apply( + ctx, + dst, + ds, + archive.WithConvertWhiteout(func(hdr *tar.Header, file string) (bool, error) { + // Keep to extract all whiteout files. + return true, nil + }), + ); err != nil { + return errors.Wrap(err, "apply with convert whiteout") + } + + return nil +} + +// Unpack a Nydus formatted tar stream into a directory. +func unpackNydusTar(ctx context.Context, bootDst, blobDst string, ra content.ReaderAt) error { + boot, err := os.OpenFile(bootDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return errors.Wrapf(err, "write to bootstrap %s", bootDst) + } + defer boot.Close() + + if err = unpackBootstrapFromNydusTar(ctx, ra, boot); err != nil { + return errors.Wrap(err, "unpack bootstrap from nydus") + } + + blob, err := os.OpenFile(blobDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return errors.Wrapf(err, "write to blob %s", blobDst) + } + defer blob.Close() + + if err = unpackBlobFromNydusTar(ctx, ra, blob); err != nil { + return errors.Wrap(err, "unpack blob from nydus") + } + + return nil +} + +// Unpack the bootstrap from nydus formatted tar stream (blob + bootstrap). +// The nydus formatted tar stream is a tar-like structure that arranges the +// data as follows: +// +// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` +func unpackBootstrapFromNydusTar(ctx context.Context, ra content.ReaderAt, target io.Writer) error { + cur := ra.Size() + reader := newSeekReader(ra) + + const headerSize = 512 + + // Seek from tail to head of nydus formatted tar stream to find nydus + // bootstrap data. + for { + if headerSize > cur { + return fmt.Errorf("invalid tar format at pos %d", cur) + } + + // Try to seek to the part of tar header. + var err error + cur, err = reader.Seek(cur-headerSize, io.SeekCurrent) + if err != nil { + return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize) + } + + tr := tar.NewReader(reader) + // Parse tar header. + hdr, err := tr.Next() + if err != nil { + return errors.Wrap(err, "parse tar header") + } + + if hdr.Name == bootstrapNameInTar { + // Try to seek to the part of tar data (bootstrap_data). + if hdr.Size > cur { + return fmt.Errorf("invalid tar format at pos %d", cur) + } + bootstrapOffset := cur - hdr.Size + _, err = reader.Seek(bootstrapOffset, io.SeekStart) + if err != nil { + return errors.Wrap(err, "seek to bootstrap data offset") + } + + // Copy tar data (bootstrap_data) to provided target writer. + if _, err := io.CopyN(target, reader, hdr.Size); err != nil { + return errors.Wrap(err, "copy bootstrap data to reader") + } + + return nil + } + + if cur == hdr.Size { + break + } + } + + return fmt.Errorf("can't find bootstrap in nydus tar") +} + +// Unpack the blob from nydus formatted tar stream (blob + bootstrap). +// The nydus formatted tar stream is a tar-like structure that arranges the +// data as follows: +// +// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` +func unpackBlobFromNydusTar(ctx context.Context, ra content.ReaderAt, target io.Writer) error { + cur := ra.Size() + reader := newSeekReader(ra) + + const headerSize = 512 + + // Seek from tail to head of nydus formatted tar stream to find nydus + // bootstrap data. + for { + if headerSize > cur { + break + } + + // Try to seek to the part of tar header. + var err error + cur, err = reader.Seek(cur-headerSize, io.SeekStart) + if err != nil { + return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize) + } + + tr := tar.NewReader(reader) + // Parse tar header. + hdr, err := tr.Next() + if err != nil { + return errors.Wrap(err, "parse tar header") + } + + if hdr.Name == bootstrapNameInTar { + if hdr.Size > cur { + return fmt.Errorf("invalid tar format at pos %d", cur) + } + cur, err = reader.Seek(cur-hdr.Size, io.SeekStart) + if err != nil { + return errors.Wrap(err, "seek to bootstrap data offset") + } + } else if hdr.Name == blobNameInTar { + if hdr.Size > cur { + return fmt.Errorf("invalid tar format at pos %d", cur) + } + _, err = reader.Seek(cur-hdr.Size, io.SeekStart) + if err != nil { + return errors.Wrap(err, "seek to blob data offset") + } + if _, err := io.CopyN(target, reader, hdr.Size); err != nil { + return errors.Wrap(err, "copy blob data to reader") + } + return nil + } + } + + return nil +} + +// Pack converts an OCI tar stream to nydus formatted stream with a tar-like +// structure that arranges the data as follows: +// +// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header` +// +// The caller should write OCI tar stream into the returned `io.WriteCloser`, +// then the Pack method will write the nydus formatted stream to `dest` +// provided by the caller. +// +// Important: the caller must check `io.WriteCloser.Close() == nil` to ensure +// the conversion workflow is finished. +func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, error) { + workDir, err := ensureWorkDir(opt.WorkDir) + if err != nil { + return nil, errors.Wrap(err, "ensure work directory") + } + defer func() { + if err != nil { + os.RemoveAll(workDir) + } + }() + + sourceDir := filepath.Join(workDir, "source") + if err := os.MkdirAll(sourceDir, 0755); err != nil { + return nil, errors.Wrap(err, "create source directory") + } + + pr, pw := io.Pipe() + + unpackDone := make(chan bool, 1) + go func() { + if err := unpackOciTar(ctx, sourceDir, pr); err != nil { + pr.CloseWithError(errors.Wrapf(err, "unpack to %s", sourceDir)) + close(unpackDone) + return + } + unpackDone <- true + }() + + wc := newWriteCloser(pw, func() error { + defer func() { + os.RemoveAll(workDir) + }() + + // Because PipeWriter#Close is called does not mean that the PipeReader + // has finished reading all the data, and unpack may not be complete yet, + // so we need to wait for that here. + <-unpackDone + + blobPath := filepath.Join(workDir, "blob") + blobFifo, err := fifo.OpenFifo(ctx, blobPath, syscall.O_CREAT|syscall.O_RDONLY|syscall.O_NONBLOCK, 0644) + if err != nil { + return errors.Wrapf(err, "create fifo file") + } + defer blobFifo.Close() + + go func() { + err := tool.Pack(tool.PackOption{ + BuilderPath: getBuilder(opt.BuilderPath), + + BlobPath: blobPath, + FsVersion: opt.FsVersion, + SourcePath: sourceDir, + ChunkDictPath: opt.ChunkDictPath, + PrefetchPatterns: opt.PrefetchPatterns, + Compressor: opt.Compressor, + Timeout: opt.Timeout, + }) + if err != nil { + pw.CloseWithError(errors.Wrapf(err, "convert blob for %s", sourceDir)) + blobFifo.Close() + } + }() + + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(dest, blobFifo, *buffer); err != nil { + return errors.Wrap(err, "pack nydus tar") + } + + return nil + }) + + return wc, nil +} + +// Merge multiple nydus bootstraps (from each layer of image) to a final +// bootstrap. And due to the possibility of enabling the `ChunkDictPath` +// option causes the data deduplication, it will return the actual blob +// digests referenced by the bootstrap. +func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption) ([]digest.Digest, error) { + workDir, err := ensureWorkDir(opt.WorkDir) + if err != nil { + return nil, errors.Wrap(err, "ensure work directory") + } + defer os.RemoveAll(workDir) + + eg, ctx := errgroup.WithContext(ctx) + sourceBootstrapPaths := []string{} + for idx := range layers { + sourceBootstrapPaths = append(sourceBootstrapPaths, filepath.Join(workDir, layers[idx].Digest.Hex())) + eg.Go(func(idx int) func() error { + return func() error { + layer := layers[idx] + + // Use the hex hash string of whole tar blob as the bootstrap name. + bootstrap, err := os.Create(filepath.Join(workDir, layer.Digest.Hex())) + if err != nil { + return errors.Wrap(err, "create source bootstrap") + } + defer bootstrap.Close() + + if err := unpackBootstrapFromNydusTar(ctx, layer.ReaderAt, bootstrap); err != nil { + return errors.Wrap(err, "unpack nydus tar") + } + + return nil + } + }(idx)) + } + + if err := eg.Wait(); err != nil { + return nil, errors.Wrap(err, "unpack all bootstraps") + } + + targetBootstrapPath := filepath.Join(workDir, "bootstrap") + + blobDigests, err := tool.Merge(tool.MergeOption{ + BuilderPath: getBuilder(opt.BuilderPath), + + SourceBootstrapPaths: sourceBootstrapPaths, + TargetBootstrapPath: targetBootstrapPath, + ChunkDictPath: opt.ChunkDictPath, + PrefetchPatterns: opt.PrefetchPatterns, + OutputJSONPath: filepath.Join(workDir, "merge-output.json"), + Timeout: opt.Timeout, + }) + if err != nil { + return nil, errors.Wrap(err, "merge bootstrap") + } + + var rc io.ReadCloser + + if opt.WithTar { + rc, err = packToTar(targetBootstrapPath, fmt.Sprintf("image/%s", bootstrapNameInTar), false) + if err != nil { + return nil, errors.Wrap(err, "pack bootstrap to tar") + } + } else { + rc, err = os.Open(targetBootstrapPath) + if err != nil { + return nil, errors.Wrap(err, "open targe bootstrap") + } + } + defer rc.Close() + + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err = io.CopyBuffer(dest, rc, *buffer); err != nil { + return nil, errors.Wrap(err, "copy merged bootstrap") + } + + return blobDigests, nil +} + +// Unpack converts a nydus blob layer to OCI formatted tar stream. +func Unpack(ctx context.Context, ra content.ReaderAt, dest io.Writer, opt UnpackOption) error { + workDir, err := ensureWorkDir(opt.WorkDir) + if err != nil { + return errors.Wrap(err, "ensure work directory") + } + defer os.RemoveAll(workDir) + + bootPath, blobPath := filepath.Join(workDir, bootstrapNameInTar), filepath.Join(workDir, blobNameInTar) + if err = unpackNydusTar(ctx, bootPath, blobPath, ra); err != nil { + return errors.Wrap(err, "unpack nydus tar") + } + + tarPath := filepath.Join(workDir, "oci.tar") + blobFifo, err := fifo.OpenFifo(ctx, tarPath, syscall.O_CREAT|syscall.O_RDONLY|syscall.O_NONBLOCK, 0644) + if err != nil { + return errors.Wrapf(err, "create fifo file") + } + defer blobFifo.Close() + + unpackErrChan := make(chan error) + go func() { + defer close(unpackErrChan) + err := tool.Unpack(tool.UnpackOption{ + BuilderPath: getBuilder(opt.BuilderPath), + BootstrapPath: bootPath, + BlobPath: blobPath, + TarPath: tarPath, + Timeout: opt.Timeout, + }) + if err != nil { + blobFifo.Close() + unpackErrChan <- err + } + }() + + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(dest, blobFifo, *buffer); err != nil { + if unpackErr := <-unpackErrChan; unpackErr != nil { + return errors.Wrap(unpackErr, "unpack") + } + return errors.Wrap(err, "copy oci tar") + } + + return nil +} + +// IsNydusBlobAndExists returns true when the specified digest of content exists in +// the content store and it's nydus blob format. +func IsNydusBlobAndExists(ctx context.Context, cs content.Store, desc ocispec.Descriptor) bool { + _, err := cs.Info(ctx, desc.Digest) + if err != nil { + return false + } + + return IsNydusBlob(ctx, desc) +} + +// IsNydusBlob returns true when the specified descriptor is nydus blob format. +func IsNydusBlob(ctx context.Context, desc ocispec.Descriptor) bool { + if desc.Annotations == nil { + return false + } + + _, hasAnno := desc.Annotations[LayerAnnotationNydusBlob] + return hasAnno +} + +// LayerConvertFunc returns a function which converts an OCI image layer to +// a nydus blob layer, and set the media type to "application/vnd.oci.image.layer.nydus.blob.v1". +func LayerConvertFunc(opt PackOption) converter.ConvertFunc { + return func(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { + if !images.IsLayerType(desc.MediaType) { + return nil, nil + } + + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return nil, errors.Wrap(err, "get source blob reader") + } + defer ra.Close() + rdr := io.NewSectionReader(ra, 0, ra.Size()) + + ref := fmt.Sprintf("convert-nydus-from-%s", desc.Digest) + dst, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) + if err != nil { + return nil, errors.Wrap(err, "open blob writer") + } + defer dst.Close() + + tr, err := compression.DecompressStream(rdr) + if err != nil { + return nil, errors.Wrap(err, "decompress blob stream") + } + + digester := digest.SHA256.Digester() + pr, pw := io.Pipe() + tw, err := Pack(ctx, io.MultiWriter(pw, digester.Hash()), opt) + if err != nil { + return nil, errors.Wrap(err, "pack tar to nydus") + } + + go func() { + defer pw.Close() + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(tw, tr, *buffer); err != nil { + pw.CloseWithError(err) + return + } + if err := tr.Close(); err != nil { + pw.CloseWithError(err) + return + } + if err := tw.Close(); err != nil { + pw.CloseWithError(err) + return + } + }() + + if err := content.Copy(ctx, dst, pr, 0, ""); err != nil { + return nil, errors.Wrap(err, "copy nydus blob to content store") + } + + blobDigest := digester.Digest() + info, err := cs.Info(ctx, blobDigest) + if err != nil { + return nil, errors.Wrapf(err, "get blob info %s", blobDigest) + } + if info.Labels == nil { + info.Labels = map[string]string{} + } + // Write a diff id label of layer in content store for simplifying + // diff id calculation to speed up the conversion. + // See: https://github.com/containerd/containerd/blob/e4fefea5544d259177abb85b64e428702ac49c97/images/diffid.go#L49 + info.Labels[labels.LabelUncompressed] = blobDigest.String() + _, err = cs.Update(ctx, info) + if err != nil { + return nil, errors.Wrap(err, "update layer label") + } + + newDesc := ocispec.Descriptor{ + Digest: blobDigest, + Size: info.Size, + MediaType: MediaTypeNydusBlob, + Annotations: map[string]string{ + // Use `containerd.io/uncompressed` to generate DiffID of + // layer defined in OCI spec. + LayerAnnotationUncompressed: blobDigest.String(), + LayerAnnotationNydusBlob: "true", + }, + } + + if opt.Backend != nil { + blobRa, err := cs.ReaderAt(ctx, newDesc) + if err != nil { + return nil, errors.Wrap(err, "get nydus blob reader") + } + defer blobRa.Close() + + if err := opt.Backend.Push(ctx, blobRa, blobDigest); err != nil { + return nil, errors.Wrap(err, "push to storage backend") + } + } + + return &newDesc, nil + } +} + +// ConvertHookFunc returns a function which will be used as a callback +// called for each blob after conversion is done. The function only hooks +// the index conversion and the manifest conversion. +func ConvertHookFunc(opt MergeOption) converter.ConvertHookFunc { + return func(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error) { + switch { + case images.IsIndexType(newDesc.MediaType): + return convertIndex(ctx, cs, orgDesc, newDesc) + case images.IsManifestType(newDesc.MediaType): + return convertManifest(ctx, cs, newDesc, opt) + default: + return newDesc, nil + } + } +} + +// convertIndex modifies the original index by appending "nydus.remoteimage.v1" +// to the Platform.OSFeatures of each modified manifest descriptors. +func convertIndex(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error) { + var orgIndex ocispec.Index + if _, err := readJSON(ctx, cs, &orgIndex, orgDesc); err != nil { + return nil, errors.Wrap(err, "read target image index json") + } + // isManifestModified is a function to check whether the manifest is modified. + isManifestModified := func(manifest ocispec.Descriptor) bool { + for _, oldManifest := range orgIndex.Manifests { + if manifest.Digest == oldManifest.Digest { + return false + } + } + return true + } + + var index ocispec.Index + indexLabels, err := readJSON(ctx, cs, &index, *newDesc) + if err != nil { + return nil, errors.Wrap(err, "read index json") + } + for i, manifest := range index.Manifests { + if !isManifestModified(manifest) { + // Skip the manifest which is not modified. + continue + } + manifest.Platform.OSFeatures = append(manifest.Platform.OSFeatures, ManifestOSFeatureNydus) + index.Manifests[i] = manifest + } + // Update image index in content store. + newIndexDesc, err := writeJSON(ctx, cs, index, *newDesc, indexLabels) + if err != nil { + return nil, errors.Wrap(err, "write index json") + } + return newIndexDesc, nil +} + +// convertManifest merges all the nydus blob layers into a +// nydus bootstrap layer, update the image config, +// and modify the image manifest. +func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, error) { + var manifest ocispec.Manifest + manifestDesc := *newDesc + manifestLabels, err := readJSON(ctx, cs, &manifest, manifestDesc) + if err != nil { + return nil, errors.Wrap(err, "read manifest json") + } + + // Append bootstrap layer to manifest. + bootstrapDesc, blobDescs, err := MergeLayers(ctx, cs, manifest.Layers, MergeOption{ + BuilderPath: opt.BuilderPath, + WorkDir: opt.WorkDir, + ChunkDictPath: opt.ChunkDictPath, + FsVersion: opt.FsVersion, + WithTar: true, + }) + if err != nil { + return nil, errors.Wrap(err, "merge nydus layers") + } + if opt.Backend != nil { + // Only append nydus bootstrap layer into manifest, and do not put nydus + // blob layer into manifest if blob storage backend is specified. + manifest.Layers = []ocispec.Descriptor{*bootstrapDesc} + } else { + for idx, blobDesc := range blobDescs { + blobGCLabelKey := fmt.Sprintf("containerd.io/gc.ref.content.l.%d", idx) + manifestLabels[blobGCLabelKey] = blobDesc.Digest.String() + } + // Affected by chunk dict, the blob list referenced by final bootstrap + // are from different layers, part of them are from original layers, part + // from chunk dict bootstrap, so we need to rewrite manifest's layers here. + manifest.Layers = append(blobDescs, *bootstrapDesc) + } + + // Update the gc label of bootstrap layer + bootstrapGCLabelKey := fmt.Sprintf("containerd.io/gc.ref.content.l.%d", len(manifest.Layers)-1) + manifestLabels[bootstrapGCLabelKey] = bootstrapDesc.Digest.String() + + // Rewrite diff ids and remove useless annotation. + var config ocispec.Image + configLabels, err := readJSON(ctx, cs, &config, manifest.Config) + if err != nil { + return nil, errors.Wrap(err, "read image config") + } + if opt.Backend != nil { + config.RootFS.DiffIDs = []digest.Digest{digest.Digest(bootstrapDesc.Annotations[LayerAnnotationUncompressed])} + } else { + config.RootFS.DiffIDs = make([]digest.Digest, 0, len(manifest.Layers)) + for i, layer := range manifest.Layers { + config.RootFS.DiffIDs = append(config.RootFS.DiffIDs, digest.Digest(layer.Annotations[LayerAnnotationUncompressed])) + // Remove useless annotation. + delete(manifest.Layers[i].Annotations, LayerAnnotationUncompressed) + } + } + // Update image config in content store. + newConfigDesc, err := writeJSON(ctx, cs, config, manifest.Config, configLabels) + if err != nil { + return nil, errors.Wrap(err, "write image config") + } + manifest.Config = *newConfigDesc + // Update the config gc label + manifestLabels[configGCLabelKey] = newConfigDesc.Digest.String() + + // Update image manifest in content store. + newManifestDesc, err := writeJSON(ctx, cs, manifest, manifestDesc, manifestLabels) + if err != nil { + return nil, errors.Wrap(err, "write manifest") + } + + return newManifestDesc, nil +} + +// MergeLayers merges a list of nydus blob layer into a nydus bootstrap layer. +// The media type of the nydus bootstrap layer is "application/vnd.oci.image.layer.v1.tar+gzip". +func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, []ocispec.Descriptor, error) { + // Extracts nydus bootstrap from nydus format for each layer. + layers := []Layer{} + + var chainID digest.Digest + for _, blobDesc := range descs { + ra, err := cs.ReaderAt(ctx, blobDesc) + if err != nil { + return nil, nil, errors.Wrapf(err, "get reader for blob %q", blobDesc.Digest) + } + defer ra.Close() + layers = append(layers, Layer{ + Digest: blobDesc.Digest, + ReaderAt: ra, + }) + if chainID == "" { + chainID = identity.ChainID([]digest.Digest{blobDesc.Digest}) + } else { + chainID = identity.ChainID([]digest.Digest{chainID, blobDesc.Digest}) + } + } + + // Merge all nydus bootstraps into a final nydus bootstrap. + pr, pw := io.Pipe() + blobDigestChan := make(chan []digest.Digest, 1) + go func() { + defer pw.Close() + blobDigests, err := Merge(ctx, layers, pw, opt) + if err != nil { + pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap")) + } + blobDigestChan <- blobDigests + }() + + // Compress final nydus bootstrap to tar.gz and write into content store. + cw, err := content.OpenWriter(ctx, cs, content.WithRef("nydus-merge-"+chainID.String())) + if err != nil { + return nil, nil, errors.Wrap(err, "open content store writer") + } + defer cw.Close() + + gw := gzip.NewWriter(cw) + uncompressedDgst := digest.SHA256.Digester() + compressed := io.MultiWriter(gw, uncompressedDgst.Hash()) + buffer := bufPool.Get().(*[]byte) + defer bufPool.Put(buffer) + if _, err := io.CopyBuffer(compressed, pr, *buffer); err != nil { + return nil, nil, errors.Wrapf(err, "copy bootstrap targz into content store") + } + if err := gw.Close(); err != nil { + return nil, nil, errors.Wrap(err, "close gzip writer") + } + + compressedDgst := cw.Digest() + if err := cw.Commit(ctx, 0, compressedDgst, content.WithLabels(map[string]string{ + LayerAnnotationUncompressed: uncompressedDgst.Digest().String(), + })); err != nil { + if !errdefs.IsAlreadyExists(err) { + return nil, nil, errors.Wrap(err, "commit to content store") + } + } + if err := cw.Close(); err != nil { + return nil, nil, errors.Wrap(err, "close content store writer") + } + + bootstrapInfo, err := cs.Info(ctx, compressedDgst) + if err != nil { + return nil, nil, errors.Wrap(err, "get info from content store") + } + + blobDigests := <-blobDigestChan + blobDescs := []ocispec.Descriptor{} + blobIDs := []string{} + for _, blobDigest := range blobDigests { + blobInfo, err := cs.Info(ctx, blobDigest) + if err != nil { + return nil, nil, errors.Wrap(err, "get info from content store") + } + blobDesc := ocispec.Descriptor{ + Digest: blobDigest, + Size: blobInfo.Size, + MediaType: MediaTypeNydusBlob, + Annotations: map[string]string{ + LayerAnnotationUncompressed: blobDigest.String(), + LayerAnnotationNydusBlob: "true", + }, + } + blobDescs = append(blobDescs, blobDesc) + blobIDs = append(blobIDs, blobDigest.Hex()) + } + + blobIDsBytes, err := json.Marshal(blobIDs) + if err != nil { + return nil, nil, errors.Wrap(err, "marshal blob ids") + } + + if opt.FsVersion == "" { + opt.FsVersion = "5" + } + + bootstrapDesc := ocispec.Descriptor{ + Digest: compressedDgst, + Size: bootstrapInfo.Size, + MediaType: ocispec.MediaTypeImageLayerGzip, + Annotations: map[string]string{ + LayerAnnotationUncompressed: uncompressedDgst.Digest().String(), + LayerAnnotationFSVersion: opt.FsVersion, + // Use this annotation to identify nydus bootstrap layer. + LayerAnnotationNydusBootstrap: "true", + // Track all blob digests for nydus snapshotter. + LayerAnnotationNydusBlobIDs: string(blobIDsBytes), + }, + } + + return &bootstrapDesc, blobDescs, nil +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_windows.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_windows.go new file mode 100644 index 0000000000..12cb53ed53 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_windows.go @@ -0,0 +1,51 @@ +//go:build windows +// +build windows + +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images/converter" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, error) { + panic("not implemented") +} + +func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption) error { + panic("not implemented") +} + +func Unpack(ctx context.Context, ia content.ReaderAt, dest io.Writer, opt UnpackOption) error { + panic("not implemented") +} + +func IsNydusBlobAndExists(ctx context.Context, cs content.Store, desc ocispec.Descriptor) bool { + panic("not implemented") +} + +func IsNydusBlob(ctx context.Context, desc ocispec.Descriptor) bool { + panic("not implemented") +} + +func LayerConvertFunc(opt PackOption) converter.ConvertFunc { + panic("not implemented") +} + +func ConvertHookFunc(opt MergeOption) converter.ConvertHookFunc { + panic("not implemented") +} + +func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, error) { + panic("not implemented") +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go new file mode 100644 index 0000000000..55e98cc097 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package tool + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os/exec" + "strings" + "time" + + "github.com/containerd/nydus-snapshotter/pkg/errdefs" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var logger = logrus.WithField("module", "builder") + +type PackOption struct { + BuilderPath string + + BootstrapPath string + BlobPath string + FsVersion string + SourcePath string + ChunkDictPath string + PrefetchPatterns string + Compressor string + Timeout *time.Duration +} + +type MergeOption struct { + BuilderPath string + + SourceBootstrapPaths []string + TargetBootstrapPath string + ChunkDictPath string + PrefetchPatterns string + OutputJSONPath string + Timeout *time.Duration +} + +type UnpackOption struct { + BuilderPath string + BootstrapPath string + BlobPath string + TarPath string + Timeout *time.Duration +} + +type outputJSON struct { + Blobs []string +} + +func Pack(option PackOption) error { + if option.FsVersion == "" { + option.FsVersion = "5" + } + + args := []string{ + "create", + "--log-level", + "warn", + "--prefetch-policy", + "fs", + "--blob", + option.BlobPath, + "--source-type", + "directory", + "--whiteout-spec", + "none", + "--fs-version", + option.FsVersion, + "--inline-bootstrap", + } + if option.ChunkDictPath != "" { + args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath)) + } + if option.PrefetchPatterns == "" { + option.PrefetchPatterns = "/" + } + if option.Compressor != "" { + args = append(args, "--compressor", option.Compressor) + } + args = append(args, option.SourcePath) + + ctx := context.Background() + var cancel context.CancelFunc + if option.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *option.Timeout) + defer cancel() + } + + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " ")) + + cmd := exec.CommandContext(ctx, option.BuilderPath, args...) + cmd.Stdout = logger.Writer() + cmd.Stderr = logger.Writer() + cmd.Stdin = strings.NewReader(option.PrefetchPatterns) + + if err := cmd.Run(); err != nil { + if errdefs.IsSignalKilled(err) && option.Timeout != nil { + logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) + } else { + logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) + } + return err + } + + return nil +} + +func Merge(option MergeOption) ([]digest.Digest, error) { + args := []string{ + "merge", + "--log-level", + "warn", + "--prefetch-policy", + "fs", + "--output-json", + option.OutputJSONPath, + "--bootstrap", + option.TargetBootstrapPath, + } + if option.ChunkDictPath != "" { + args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath)) + } + if option.PrefetchPatterns == "" { + option.PrefetchPatterns = "/" + } + args = append(args, option.SourceBootstrapPaths...) + + ctx := context.Background() + var cancel context.CancelFunc + if option.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *option.Timeout) + defer cancel() + } + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " ")) + + cmd := exec.CommandContext(ctx, option.BuilderPath, args...) + cmd.Stdout = logger.Writer() + cmd.Stderr = logger.Writer() + cmd.Stdin = strings.NewReader(option.PrefetchPatterns) + + if err := cmd.Run(); err != nil { + if errdefs.IsSignalKilled(err) && option.Timeout != nil { + logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) + } else { + logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) + } + return nil, errors.Wrap(err, "run merge command") + } + + outputBytes, err := ioutil.ReadFile(option.OutputJSONPath) + if err != nil { + return nil, errors.Wrapf(err, "read file %s", option.OutputJSONPath) + } + var output outputJSON + err = json.Unmarshal(outputBytes, &output) + if err != nil { + return nil, errors.Wrapf(err, "unmarshal output json file %s", option.OutputJSONPath) + } + + blobDigests := []digest.Digest{} + for _, blobID := range output.Blobs { + blobDigests = append(blobDigests, digest.NewDigestFromHex(string(digest.SHA256), blobID)) + } + + return blobDigests, nil +} + +func Unpack(option UnpackOption) error { + args := []string{ + "unpack", + "--log-level", + "warn", + "--bootstrap", + option.BootstrapPath, + "--output", + option.TarPath, + } + if option.BlobPath != "" { + args = append(args, "--blob", option.BlobPath) + } + + ctx := context.Background() + var cancel context.CancelFunc + if option.Timeout != nil { + ctx, cancel = context.WithTimeout(ctx, *option.Timeout) + defer cancel() + } + + logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " ")) + + cmd := exec.CommandContext(ctx, option.BuilderPath, args...) + cmd.Stdout = logger.Writer() + cmd.Stderr = logger.Writer() + + if err := cmd.Run(); err != nil { + if errdefs.IsSignalKilled(err) && option.Timeout != nil { + logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout) + } else { + logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args) + } + return err + } + + return nil +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go new file mode 100644 index 0000000000..9d0590a0c9 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +import ( + "context" + "time" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" +) + +type Layer struct { + // Digest represents the hash of whole tar blob. + Digest digest.Digest + // ReaderAt holds the reader of whole tar blob. + ReaderAt content.ReaderAt +} + +// Backend uploads blobs generated by nydus-image builder to a backend storage such as: +// - oss: A object storage backend, which uses its SDK to upload blob file. +type Backend interface { + // Push pushes specified blob file to remote storage backend. + Push(ctx context.Context, ra content.ReaderAt, blobDigest digest.Digest) error + // Check checks whether a blob exists in remote storage backend, + // blob exists -> return (blobPath, nil) + // blob not exists -> return ("", err) + Check(blobDigest digest.Digest) (string, error) + // Type returns backend type name. + Type() string +} + +type PackOption struct { + // WorkDir is used as the work directory during layer pack. + WorkDir string + // BuilderPath holds the path of `nydus-image` binary tool. + BuilderPath string + // FsVersion specifies nydus RAFS format version, possible + // values: `5`, `6` (EROFS-compatible), default is `5`. + FsVersion string + // ChunkDictPath holds the bootstrap path of chunk dict image. + ChunkDictPath string + // PrefetchPatterns holds file path pattern list want to prefetch. + PrefetchPatterns string + // Compressor specifies nydus blob compression algorithm. + Compressor string + // Backend uploads blobs generated by nydus-image builder to a backend storage. + Backend Backend + // Timeout cancels execution once exceed the specified time. + Timeout *time.Duration +} + +type MergeOption struct { + // WorkDir is used as the work directory during layer merge. + WorkDir string + // BuilderPath holds the path of `nydus-image` binary tool. + BuilderPath string + // FsVersion specifies nydus RAFS format version, possible + // values: `5`, `6` (EROFS-compatible), default is `5`. + FsVersion string + // ChunkDictPath holds the bootstrap path of chunk dict image. + ChunkDictPath string + // PrefetchPatterns holds file path pattern list want to prefetch. + PrefetchPatterns string + // WithTar puts bootstrap into a tar stream (no gzip). + WithTar bool + // Backend uploads blobs generated by nydus-image builder to a backend storage. + Backend Backend + // Timeout cancels execution once exceed the specified time. + Timeout *time.Duration +} + +type UnpackOption struct { + // WorkDir is used as the work directory during layer unpack. + WorkDir string + // BuilderPath holds the path of `nydus-image` binary tool. + BuilderPath string + // Timeout cancels execution once exceed the specified time. + Timeout *time.Duration +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go new file mode 100644 index 0000000000..849d870b34 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2022. Nydus Developers. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package converter + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type writeCloser struct { + closed bool + io.WriteCloser + action func() error +} + +func (c *writeCloser) Close() error { + if c.closed { + return nil + } + + if err := c.WriteCloser.Close(); err != nil { + return err + } + c.closed = true + + if err := c.action(); err != nil { + return err + } + + return nil +} + +func newWriteCloser(wc io.WriteCloser, action func() error) *writeCloser { + return &writeCloser{ + WriteCloser: wc, + action: action, + } +} + +type seekReader struct { + io.ReaderAt + pos int64 +} + +func (ra *seekReader) Read(p []byte) (int, error) { + n, err := ra.ReaderAt.ReadAt(p, ra.pos) + ra.pos += int64(len(p)) + return n, err +} + +func (ra *seekReader) Seek(offset int64, whence int) (int64, error) { + if whence == io.SeekCurrent { + ra.pos += offset + } else if whence == io.SeekStart { + ra.pos = offset + } else { + return 0, fmt.Errorf("unsupported whence %d", whence) + } + return ra.pos, nil +} + +func newSeekReader(ra io.ReaderAt) *seekReader { + return &seekReader{ + ReaderAt: ra, + pos: 0, + } +} + +// packToTar makes .tar(.gz) stream of file named `name` and return reader. +func packToTar(src string, name string, compress bool) (io.ReadCloser, error) { + fi, err := os.Stat(src) + if err != nil { + return nil, err + } + + dirHdr := &tar.Header{ + Name: filepath.Dir(name), + Mode: 0755, + Typeflag: tar.TypeDir, + } + + hdr := &tar.Header{ + Name: name, + Mode: 0444, + Size: fi.Size(), + } + + reader, writer := io.Pipe() + + go func() { + // Prepare targz writer + var tw *tar.Writer + var gw *gzip.Writer + var err error + var file *os.File + + if compress { + gw = gzip.NewWriter(writer) + tw = tar.NewWriter(gw) + } else { + tw = tar.NewWriter(writer) + } + + defer func() { + err1 := tw.Close() + var err2 error + if gw != nil { + err2 = gw.Close() + } + + var finalErr error + + // Return the first error encountered to the other end and ignore others. + if err != nil { + finalErr = err + } else if err1 != nil { + finalErr = err1 + } else if err2 != nil { + finalErr = err2 + } + + writer.CloseWithError(finalErr) + }() + + file, err = os.Open(src) + if err != nil { + return + } + defer file.Close() + + // Write targz stream + if err = tw.WriteHeader(dirHdr); err != nil { + return + } + + if err = tw.WriteHeader(hdr); err != nil { + return + } + + if _, err = io.Copy(tw, file); err != nil { + return + } + }() + + return reader, nil +} + +// Copied from containerd/containerd project, copyright The containerd Authors. +// https://github.com/containerd/containerd/blob/4902059cb554f4f06a8d06a12134c17117809f4e/images/converter/default.go#L385 +func readJSON(ctx context.Context, cs content.Store, x interface{}, desc ocispec.Descriptor) (map[string]string, error) { + info, err := cs.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } + labels := info.Labels + b, err := content.ReadBlob(ctx, cs, desc) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, x); err != nil { + return nil, err + } + return labels, nil +} + +// Copied from containerd/containerd project, copyright The containerd Authors. +// https://github.com/containerd/containerd/blob/4902059cb554f4f06a8d06a12134c17117809f4e/images/converter/default.go#L401 +func writeJSON(ctx context.Context, cs content.Store, x interface{}, oldDesc ocispec.Descriptor, labels map[string]string) (*ocispec.Descriptor, error) { + b, err := json.Marshal(x) + if err != nil { + return nil, err + } + dgst := digest.SHA256.FromBytes(b) + ref := fmt.Sprintf("converter-write-json-%s", dgst.String()) + w, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) + if err != nil { + return nil, err + } + if err := content.Copy(ctx, w, bytes.NewReader(b), int64(len(b)), dgst, content.WithLabels(labels)); err != nil { + return nil, err + } + if err := w.Close(); err != nil { + return nil, err + } + newDesc := oldDesc + newDesc.Size = int64(len(b)) + newDesc.Digest = dgst + return &newDesc, nil +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go new file mode 100644 index 0000000000..3bdf74cb9d --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2020. Ant Group. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package errdefs + +import ( + stderrors "errors" + "net" + "strings" + "syscall" + + "github.com/pkg/errors" +) + +const signalKilled = "signal: killed" + +var ( + ErrAlreadyExists = errors.New("already exists") + ErrNotFound = errors.New("not found") +) + +// IsAlreadyExists returns true if the error is due to already exists +func IsAlreadyExists(err error) bool { + return errors.Is(err, ErrAlreadyExists) +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Is(err, ErrNotFound) +} + +// IsSignalKilled returns true if the error is signal killed +func IsSignalKilled(err error) bool { + return strings.Contains(err.Error(), signalKilled) +} + +// IsConnectionClosed returns true if error is due to connection closed +// this is used when snapshotter closed by sig term +func IsConnectionClosed(err error) bool { + switch err := err.(type) { + case *net.OpError: + return err.Err.Error() == "use of closed network connection" + default: + return false + } +} + +func IsErofsMounted(err error) bool { + return stderrors.Is(err, syscall.EBUSY) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/NOTICE.md b/vendor/github.com/containerd/stargz-snapshotter/NOTICE.md deleted file mode 100644 index c907e4216c..0000000000 --- a/vendor/github.com/containerd/stargz-snapshotter/NOTICE.md +++ /dev/null @@ -1,67 +0,0 @@ -The source code developed under the Stargz Snapshotter Project is licensed under Apache License 2.0. - -However, the Stargz Snapshotter project contains modified subcomponents from Container Registry Filesystem Project with separate copyright notices and license terms. Your use of the source code for the subcomponent is subject to the terms and conditions as defined by the source project. Files in these subcomponents contain following file header. - -``` -Copyright 2019 The Go Authors. All rights reserved. -Use of this source code is governed by a BSD-style -license that can be found in the NOTICE.md file. -``` - -These source code is governed by a 3-Clause BSD license. The copyright notice, list of conditions and disclaimer are the following. - -``` -Copyright (c) 2019 Google LLC. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -``` - -The Stargz Snapshotter project also contains modified benchmarking code from HelloBench Project with separate copyright notices and license terms. Your use of the source code for the benchmarking code is subject to the terms and conditions as defined by the source project. These source code is governed by a MIT license. The copyright notice, condition and disclaimer are the following. The file in the benchmarking code contains it as the file header. - -``` -The MIT License (MIT) - -Copyright (c) 2015 Tintri - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -``` diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 9ee97fc911..b071cea51d 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -26,10 +26,10 @@ import ( "archive/tar" "bytes" "compress/gzip" + "context" "errors" "fmt" "io" - "io/ioutil" "os" "path" "runtime" @@ -48,6 +48,8 @@ type options struct { prioritizedFiles []string missedPrioritizedFiles *[]string compression Compression + ctx context.Context + minChunkSize int } type Option func(o *options) error @@ -62,6 +64,7 @@ func WithChunkSize(chunkSize int) Option { // WithCompressionLevel option specifies the gzip compression level. // The default is gzip.BestCompression. +// This option will be ignored if WithCompression option is used. // See also: https://godoc.org/compress/gzip#pkg-constants func WithCompressionLevel(level int) Option { return func(o *options) error { @@ -104,6 +107,26 @@ func WithCompression(compression Compression) Option { } } +// WithContext specifies a context that can be used for clean canceleration. +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.ctx = ctx + return nil + } +} + +// WithMinChunkSize option specifies the minimal number of bytes of data +// must be written in one gzip stream. +// By increasing this number, one gzip stream can contain multiple files +// and it hopefully leads to smaller result blob. +// NOTE: This adds a TOC property that old reader doesn't understand. +func WithMinChunkSize(minChunkSize int) Option { + return func(o *options) error { + o.minChunkSize = minChunkSize + return nil + } +} + // Blob is an eStargz blob. type Blob struct { io.ReadCloser @@ -139,12 +162,29 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) } layerFiles := newTempFiles() + ctx := opts.ctx + if ctx == nil { + ctx = context.Background() + } + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + // nop + case <-ctx.Done(): + layerFiles.CleanupAll() + } + }() defer func() { if rErr != nil { if err := layerFiles.CleanupAll(); err != nil { rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) } } + if cErr := ctx.Err(); cErr != nil { + rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) + } }() tarBlob, err := decompressBlob(tarBlob, layerFiles) if err != nil { @@ -154,7 +194,14 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { if err != nil { return nil, err } - tarParts := divideEntries(entries, runtime.GOMAXPROCS(0)) + var tarParts [][]*entry + if opts.minChunkSize > 0 { + // Each entry needs to know the size of the current gzip stream so they + // cannot be processed in parallel. + tarParts = [][]*entry{entries} + } else { + tarParts = divideEntries(entries, runtime.GOMAXPROCS(0)) + } writers := make([]*Writer, len(tarParts)) payloads := make([]*os.File, len(tarParts)) var mu sync.Mutex @@ -169,6 +216,13 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { } sw := NewWriterWithCompressor(esgzFile, opts.compression) sw.ChunkSize = opts.chunkSize + sw.MinChunkSize = opts.minChunkSize + if sw.needsOpenGzEntries == nil { + sw.needsOpenGzEntries = make(map[string]struct{}) + } + for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} { + sw.needsOpenGzEntries[f] = struct{}{} + } if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { return err } @@ -183,7 +237,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { rErr = err return nil, err } - tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...) + tocAndFooter, tocDgst, err := closeWithCombine(writers...) if err != nil { rErr = err return nil, err @@ -226,7 +280,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { // Writers doesn't write TOC and footer to the underlying writers so they can be // combined into a single eStargz and tocAndFooter returned by this function can // be appended at the tail of that combined blob. -func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { +func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { if len(ws) == 0 { return nil, "", fmt.Errorf("at least one writer must be passed") } @@ -369,7 +423,7 @@ func readerFromEntries(entries ...*entry) io.Reader { func importTar(in io.ReaderAt) (*tarFile, error) { tf := &tarFile{} - pw, err := newCountReader(in) + pw, err := newCountReadSeeker(in) if err != nil { return nil, fmt.Errorf("failed to make position watcher: %w", err) } @@ -506,12 +560,13 @@ func newTempFiles() *tempFiles { } type tempFiles struct { - files []*os.File - filesMu sync.Mutex + files []*os.File + filesMu sync.Mutex + cleanupOnce sync.Once } func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { - f, err := ioutil.TempFile(dir, pattern) + f, err := os.CreateTemp(dir, pattern) if err != nil { return nil, err } @@ -521,7 +576,14 @@ func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { return f, nil } -func (tf *tempFiles) CleanupAll() error { +func (tf *tempFiles) CleanupAll() (err error) { + tf.cleanupOnce.Do(func() { + err = tf.cleanupAll() + }) + return +} + +func (tf *tempFiles) cleanupAll() error { tf.filesMu.Lock() defer tf.filesMu.Unlock() var allErr []error @@ -537,19 +599,19 @@ func (tf *tempFiles) CleanupAll() error { return errorutil.Aggregate(allErr) } -func newCountReader(r io.ReaderAt) (*countReader, error) { +func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) { pos := int64(0) - return &countReader{r: r, cPos: &pos}, nil + return &countReadSeeker{r: r, cPos: &pos}, nil } -type countReader struct { +type countReadSeeker struct { r io.ReaderAt cPos *int64 mu sync.Mutex } -func (cr *countReader) Read(p []byte) (int, error) { +func (cr *countReadSeeker) Read(p []byte) (int, error) { cr.mu.Lock() defer cr.mu.Unlock() @@ -560,7 +622,7 @@ func (cr *countReader) Read(p []byte) (int, error) { return n, err } -func (cr *countReader) Seek(offset int64, whence int) (int64, error) { +func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) { cr.mu.Lock() defer cr.mu.Unlock() @@ -581,7 +643,7 @@ func (cr *countReader) Seek(offset int64, whence int) (int64, error) { return offset, nil } -func (cr *countReader) currentPos() int64 { +func (cr *countReadSeeker) currentPos() int64 { cr.mu.Lock() defer cr.mu.Unlock() diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index 4b655c1453..f4d5546558 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -31,7 +31,6 @@ import ( "fmt" "hash" "io" - "io/ioutil" "os" "path" "sort" @@ -151,10 +150,10 @@ func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { allErr = append(allErr, err) continue } - if tocSize <= 0 { + if tocOffset >= 0 && tocSize <= 0 { tocSize = sr.Size() - tocOffset - fSize } - if tocSize < int64(len(maybeTocBytes)) { + if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) { maybeTocBytes = maybeTocBytes[:tocSize] } r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts) @@ -208,8 +207,16 @@ func (r *Reader) initFields() error { uname := map[int]string{} gname := map[int]string{} var lastRegEnt *TOCEntry - for _, ent := range r.toc.Entries { + var chunkTopIndex int + for i, ent := range r.toc.Entries { ent.Name = cleanEntryName(ent.Name) + switch ent.Type { + case "reg", "chunk": + if ent.Offset != r.toc.Entries[chunkTopIndex].Offset { + chunkTopIndex = i + } + ent.chunkTopIndex = chunkTopIndex + } if ent.Type == "reg" { lastRegEnt = ent } @@ -295,7 +302,7 @@ func (r *Reader) initFields() error { if e.isDataType() { e.nextOffset = lastOffset } - if e.Offset != 0 { + if e.Offset != 0 && e.InnerOffset == 0 { lastOffset = e.Offset } } @@ -489,6 +496,14 @@ func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { // // Name must be absolute path or one that is relative to root. func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err + } + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) newFileReader(name string) (*fileReader, error) { name = cleanEntryName(name) ent, ok := r.Lookup(name) if !ok { @@ -506,11 +521,19 @@ func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { Err: errors.New("not a regular file"), } } - fr := &fileReader{ + return &fileReader{ r: r, size: ent.Size, ents: r.getChunks(ent), + }, nil +} + +func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) { + fr, err := r.newFileReader(name) + if err != nil { + return nil, err } + fr.preRead = preRead return io.NewSectionReader(fr, 0, fr.size), nil } @@ -522,9 +545,10 @@ func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry { } type fileReader struct { - r *Reader - size int64 - ents []*TOCEntry // 1 or more reg/chunk entries + r *Reader + size int64 + ents []*TOCEntry // 1 or more reg/chunk entries + preRead func(*TOCEntry, io.Reader) error } func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { @@ -579,10 +603,48 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) } defer dr.Close() - if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil { - return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) + + if fr.preRead == nil { + if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil { + return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err) + } + return io.ReadFull(dr, p) } - return io.ReadFull(dr, p) + + var retN int + var retErr error + var found bool + var nr int64 + for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] { + if !e.isDataType() { + continue + } + if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset { + break + } + if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr { + return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err) + } + nr = e.InnerOffset + if e == ent { + found = true + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { + return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err) + } + retN, retErr = io.ReadFull(dr, p) + nr += off + int64(retN) + continue + } + cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)} + if err := fr.preRead(e, cr); err != nil { + return 0, fmt.Errorf("failed to pre read: %w", err) + } + nr += cr.n + } + if !found { + return 0, fmt.Errorf("fileReader.ReadAt: target entry not found") + } + return retN, retErr } // A Writer writes stargz files. @@ -600,11 +662,20 @@ type Writer struct { lastGroupname map[int]string compressor Compressor + uncompressedCounter *countWriteFlusher + // ChunkSize optionally controls the maximum number of bytes // of data of a regular file that can be written in one gzip // stream before a new gzip stream is started. // Zero means to use a default, currently 4 MiB. ChunkSize int + + // MinChunkSize optionally controls the minimum number of bytes + // of data must be written in one gzip stream before a new gzip + // NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand. + MinChunkSize int + + needsOpenGzEntries map[string]struct{} } // currentCompressionWriter writes to the current w.gz field, which can @@ -647,6 +718,9 @@ func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { if err != nil { return nil, fmt.Errorf("failed to parse footer: %w", err) } + if blobPayloadSize < 0 { + blobPayloadSize = sr.Size() + } return c.Reader(io.LimitReader(sr, blobPayloadSize)) } @@ -673,11 +747,12 @@ func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer { bw := bufio.NewWriter(w) cw := &countWriter{w: bw} return &Writer{ - bw: bw, - cw: cw, - toc: &JTOC{Version: 1}, - diffHash: sha256.New(), - compressor: c, + bw: bw, + cw: cw, + toc: &JTOC{Version: 1}, + diffHash: sha256.New(), + compressor: c, + uncompressedCounter: &countWriteFlusher{}, } } @@ -718,6 +793,20 @@ func (w *Writer) closeGz() error { return nil } +func (w *Writer) flushGz() error { + if w.closed { + return errors.New("flush on closed Writer") + } + if w.gz != nil { + if f, ok := w.gz.(interface { + Flush() error + }); ok { + return f.Flush() + } + } + return nil +} + // nameIfChanged returns name, unless it was the already the value of (*mp)[id], // in which case it returns the empty string. func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { @@ -737,6 +826,9 @@ func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { func (w *Writer) condOpenGz() (err error) { if w.gz == nil { w.gz, err = w.compressor.Writer(w.cw) + if w.gz != nil { + w.gz = w.uncompressedCounter.register(w.gz) + } } return } @@ -785,6 +877,8 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { if lossless { tr.RawAccounting = true } + prevOffset := w.cw.n + var prevOffsetUncompressed int64 for { h, err := tr.Next() if err == io.EOF { @@ -884,10 +978,6 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { totalSize := ent.Size // save it before we destroy ent tee := io.TeeReader(tr, payloadDigest.Hash()) for written < totalSize { - if err := w.closeGz(); err != nil { - return err - } - chunkSize := int64(w.chunkSize()) remain := totalSize - written if remain < chunkSize { @@ -895,7 +985,23 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { } else { ent.ChunkSize = chunkSize } - ent.Offset = w.cw.n + + // We flush the underlying compression writer here to correctly calculate "w.cw.n". + if err := w.flushGz(); err != nil { + return err + } + if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) { + if err := w.closeGz(); err != nil { + return err + } + ent.Offset = w.cw.n + prevOffset = ent.Offset + prevOffsetUncompressed = w.uncompressedCounter.n + } else { + ent.Offset = prevOffset + ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed + } + ent.ChunkOffset = written chunkDigest := digest.Canonical.Digester() @@ -933,7 +1039,7 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { } } } - remainDest := ioutil.Discard + remainDest := io.Discard if lossless { remainDest = dst // Preserve the remaining bytes in lossless mode } @@ -941,6 +1047,17 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { return err } +func (w *Writer) needsOpenGz(ent *TOCEntry) bool { + if ent.Type != "reg" { + return false + } + if w.needsOpenGzEntries == nil { + return false + } + _, ok := w.needsOpenGzEntries[ent.Name] + return ok +} + // DiffID returns the SHA-256 of the uncompressed tar bytes. // It is only valid to call DiffID after Close. func (w *Writer) DiffID() string { @@ -957,6 +1074,28 @@ func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) { } func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) { + if tocOff < 0 { + // This means that TOC isn't contained in the blob. + // We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from + // the external location. + start := time.Now() + toc, tocDgst, err := d.ParseTOC(nil) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } if len(tocBytes) > 0 { start := time.Now() toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) @@ -1022,6 +1161,37 @@ func (cw *countWriter) Write(p []byte) (n int, err error) { return } +type countWriteFlusher struct { + io.WriteCloser + n int64 +} + +func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser { + wc.WriteCloser = w + return wc +} + +func (wc *countWriteFlusher) Write(p []byte) (n int, err error) { + n, err = wc.WriteCloser.Write(p) + wc.n += int64(n) + return +} + +func (wc *countWriteFlusher) Flush() error { + if f, ok := wc.WriteCloser.(interface { + Flush() error + }); ok { + return f.Flush() + } + return nil +} + +func (wc *countWriteFlusher) Close() error { + err := wc.WriteCloser.Close() + wc.WriteCloser = nil + return err +} + // isGzip reports whether br is positioned right before an upcoming gzip stream. // It does not consume any bytes from br. func isGzip(br *bufio.Reader) bool { @@ -1040,3 +1210,14 @@ func positive(n int64) int64 { } return n } + +type countReader struct { + r io.Reader + n int64 +} + +func (cr *countReader) Read(p []byte) (n int, err error) { + n, err = cr.r.Read(p) + cr.n += int64(n) + return +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go index 591d7a62e1..f24afe32f4 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -60,7 +60,7 @@ type GzipCompressor struct { compressionLevel int } -func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) { +func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) { return gzip.NewWriterLevel(w, gc.compressionLevel) } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 1de13a4705..0ca6fd75f2 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -31,8 +31,9 @@ import ( "errors" "fmt" "io" - "io/ioutil" + "math/rand" "os" + "path/filepath" "reflect" "sort" "strings" @@ -44,21 +45,27 @@ import ( digest "github.com/opencontainers/go-digest" ) +func init() { + rand.Seed(time.Now().UnixNano()) +} + // TestingController is Compression with some helper methods necessary for testing. type TestingController interface { Compression - CountStreams(*testing.T, []byte) int + TestStreams(t *testing.T, b []byte, streams []int64) DiffIDOf(*testing.T, []byte) string String() string } // CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. -func CompressionTestSuite(t *testing.T, controllers ...TestingController) { +func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) { t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) } +type TestingControllerFactory func() TestingController + const ( uncompressedType int = iota gzipType @@ -75,11 +82,12 @@ var allowedPrefix = [4]string{"", "./", "/", "../"} // testBuild tests the resulting stargz blob built by this pkg has the same // contents as the normal stargz blob. -func testBuild(t *testing.T, controllers ...TestingController) { +func testBuild(t *testing.T, controllers ...TestingControllerFactory) { tests := []struct { - name string - chunkSize int - in []tarEntry + name string + chunkSize int + minChunkSize []int + in []tarEntry }{ { name: "regfiles and directories", @@ -108,11 +116,14 @@ func testBuild(t *testing.T, controllers ...TestingController) { ), }, { - name: "various files", - chunkSize: 4, + name: "various files", + chunkSize: 4, + minChunkSize: []int{0, 64000}, in: tarOf( file("baz.txt", "bazbazbazbazbazbazbaz"), - file("foo.txt", "a"), + file("foo1.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), symlink("barlink", "test/bar.txt"), dir("test/"), dir("dev/"), @@ -144,99 +155,112 @@ func testBuild(t *testing.T, controllers ...TestingController) { }, } for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } for _, srcCompression := range srcCompressions { srcCompression := srcCompression - for _, cl := range controllers { - cl := cl + for _, newCL := range controllers { + newCL := newCL for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat for _, prefix := range allowedPrefix { prefix := prefix - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) { - tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) - // Test divideEntries() - entries, err := sortEntries(tarBlob, nil, nil) // identical order - if err != nil { - t.Fatalf("failed to parse tar: %v", err) - } - var merged []*entry - for _, part := range divideEntries(entries, 4) { - merged = append(merged, part...) - } - if !reflect.DeepEqual(entries, merged) { - for _, e := range entries { - t.Logf("Original: %v", e.header) + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) { + tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) + // Test divideEntries() + entries, err := sortEntries(tarBlob, nil, nil) // identical order + if err != nil { + t.Fatalf("failed to parse tar: %v", err) } - for _, e := range merged { - t.Logf("Merged: %v", e.header) + var merged []*entry + for _, part := range divideEntries(entries, 4) { + merged = append(merged, part...) + } + if !reflect.DeepEqual(entries, merged) { + for _, e := range entries { + t.Logf("Original: %v", e.header) + } + for _, e := range merged { + t.Logf("Merged: %v", e.header) + } + t.Errorf("divided entries couldn't be merged") + return } - t.Errorf("divided entries couldn't be merged") - return - } - // Prepare sample data - wantBuf := new(bytes.Buffer) - sw := NewWriterWithCompressor(wantBuf, cl) - sw.ChunkSize = tt.chunkSize - if err := sw.AppendTar(tarBlob); err != nil { - t.Fatalf("failed to append tar to want stargz: %v", err) - } - if _, err := sw.Close(); err != nil { - t.Fatalf("failed to prepare want stargz: %v", err) - } - wantData := wantBuf.Bytes() - want, err := Open(io.NewSectionReader( - bytes.NewReader(wantData), 0, int64(len(wantData))), - WithDecompressors(cl), - ) - if err != nil { - t.Fatalf("failed to parse the want stargz: %v", err) - } + // Prepare sample data + cl1 := newCL() + wantBuf := new(bytes.Buffer) + sw := NewWriterWithCompressor(wantBuf, cl1) + sw.MinChunkSize = minChunkSize + sw.ChunkSize = tt.chunkSize + if err := sw.AppendTar(tarBlob); err != nil { + t.Fatalf("failed to append tar to want stargz: %v", err) + } + if _, err := sw.Close(); err != nil { + t.Fatalf("failed to prepare want stargz: %v", err) + } + wantData := wantBuf.Bytes() + want, err := Open(io.NewSectionReader( + bytes.NewReader(wantData), 0, int64(len(wantData))), + WithDecompressors(cl1), + ) + if err != nil { + t.Fatalf("failed to parse the want stargz: %v", err) + } - // Prepare testing data - rc, err := Build(compressBlob(t, tarBlob, srcCompression), - WithChunkSize(tt.chunkSize), WithCompression(cl)) - if err != nil { - t.Fatalf("failed to build stargz: %v", err) - } - defer rc.Close() - gotBuf := new(bytes.Buffer) - if _, err := io.Copy(gotBuf, rc); err != nil { - t.Fatalf("failed to copy built stargz blob: %v", err) - } - gotData := gotBuf.Bytes() - got, err := Open(io.NewSectionReader( - bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), - WithDecompressors(cl), - ) - if err != nil { - t.Fatalf("failed to parse the got stargz: %v", err) - } + // Prepare testing data + var opts []Option + if minChunkSize > 0 { + opts = append(opts, WithMinChunkSize(minChunkSize)) + } + cl2 := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...) + if err != nil { + t.Fatalf("failed to build stargz: %v", err) + } + defer rc.Close() + gotBuf := new(bytes.Buffer) + if _, err := io.Copy(gotBuf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + gotData := gotBuf.Bytes() + got, err := Open(io.NewSectionReader( + bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), + WithDecompressors(cl2), + ) + if err != nil { + t.Fatalf("failed to parse the got stargz: %v", err) + } - // Check DiffID is properly calculated - rc.Close() - diffID := rc.DiffID() - wantDiffID := cl.DiffIDOf(t, gotData) - if diffID.String() != wantDiffID { - t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) - } + // Check DiffID is properly calculated + rc.Close() + diffID := rc.DiffID() + wantDiffID := cl2.DiffIDOf(t, gotData) + if diffID.String() != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } - // Compare as stargz - if !isSameVersion(t, cl, wantData, gotData) { - t.Errorf("built stargz hasn't same json") - return - } - if !isSameEntries(t, want, got) { - t.Errorf("built stargz isn't same as the original") - return - } + // Compare as stargz + if !isSameVersion(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz hasn't same json") + return + } + if !isSameEntries(t, want, got) { + t.Errorf("built stargz isn't same as the original") + return + } - // Compare as tar.gz - if !isSameTarGz(t, cl, wantData, gotData) { - t.Errorf("built stargz isn't same tar.gz") - return - } - }) + // Compare as tar.gz + if !isSameTarGz(t, cl1, wantData, cl2, gotData) { + t.Errorf("built stargz isn't same tar.gz") + return + } + }) + } } } } @@ -244,13 +268,13 @@ func testBuild(t *testing.T, controllers ...TestingController) { } } -func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { - aGz, err := controller.Reader(bytes.NewReader(a)) +func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aGz, err := cla.Reader(bytes.NewReader(a)) if err != nil { t.Fatalf("failed to read A") } defer aGz.Close() - bGz, err := controller.Reader(bytes.NewReader(b)) + bGz, err := clb.Reader(bytes.NewReader(b)) if err != nil { t.Fatalf("failed to read B") } @@ -287,11 +311,11 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { return false } - aFile, err := ioutil.ReadAll(aTar) + aFile, err := io.ReadAll(aTar) if err != nil { t.Fatal("failed to read tar payload of A") } - bFile, err := ioutil.ReadAll(bTar) + bFile, err := io.ReadAll(bTar) if err != nil { t.Fatal("failed to read tar payload of B") } @@ -304,12 +328,12 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { return true } -func isSameVersion(t *testing.T, controller TestingController, a, b []byte) bool { - aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), controller) +func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { + aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla) if err != nil { t.Fatalf("failed to parse A: %v", err) } - bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), controller) + bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb) if err != nil { t.Fatalf("failed to parse B: %v", err) } @@ -463,7 +487,7 @@ func equalEntry(a, b *TOCEntry) bool { a.GID == b.GID && a.Uname == b.Uname && a.Gname == b.Gname && - (a.Offset > 0) == (b.Offset > 0) && + (a.Offset >= 0) == (b.Offset >= 0) && (a.NextOffset() > 0) == (b.NextOffset() > 0) && a.DevMajor == b.DevMajor && a.DevMinor == b.DevMinor && @@ -510,14 +534,15 @@ func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { const chunkSize = 3 // type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) -type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) +type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) // testDigestAndVerify runs specified checks against sample stargz blobs. -func testDigestAndVerify(t *testing.T, controllers ...TestingController) { +func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { tests := []struct { - name string - tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) - checks []check + name string + tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + checks []check + minChunkSize []int }{ { name: "no-regfile", @@ -544,6 +569,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { regDigest(t, "test/bar.txt", "bbb", dgstMap), ) }, + minChunkSize: []int{0, 64000}, checks: []check{ checkStargzTOC, checkVerifyTOC, @@ -581,11 +607,14 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { }, }, { - name: "with-non-regfiles", + name: "with-non-regfiles", + minChunkSize: []int{0, 64000}, tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), + regDigest(t, "bar/foo2.txt", "b", dgstMap), + regDigest(t, "foo3.txt", "c", dgstMap), symlink("barlink", "test/bar.txt"), dir("test/"), regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), @@ -599,6 +628,8 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { checkVerifyInvalidStargzFail(buildTar(t, tarOf( file("baz.txt", "bazbazbazbazbazbazbaz"), file("foo.txt", "a"), + file("bar/foo2.txt", "b"), + file("foo3.txt", "c"), symlink("barlink", "test/bar.txt"), dir("test/"), file("test/bar.txt", "testbartestbar"), @@ -612,38 +643,45 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { } for _, tt := range tests { + if len(tt.minChunkSize) == 0 { + tt.minChunkSize = []int{0} + } for _, srcCompression := range srcCompressions { srcCompression := srcCompression - for _, cl := range controllers { - cl := cl + for _, newCL := range controllers { + newCL := newCL for _, prefix := range allowedPrefix { prefix := prefix for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) { - // Get original tar file and chunk digests - dgstMap := make(map[string]digest.Digest) - tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) + for _, minChunkSize := range tt.minChunkSize { + minChunkSize := minChunkSize + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) { + // Get original tar file and chunk digests + dgstMap := make(map[string]digest.Digest) + tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) - rc, err := Build(compressBlob(t, tarBlob, srcCompression), - WithChunkSize(chunkSize), WithCompression(cl)) - if err != nil { - t.Fatalf("failed to convert stargz: %v", err) - } - tocDigest := rc.TOCDigest() - defer rc.Close() - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, rc); err != nil { - t.Fatalf("failed to copy built stargz blob: %v", err) - } - newStargz := buf.Bytes() - // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. - dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) + cl := newCL() + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + tocDigest := rc.TOCDigest() + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + newStargz := buf.Bytes() + // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. + dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) - for _, check := range tt.checks { - check(t, newStargz, tocDigest, dgstMap, cl) - } - }) + for _, check := range tt.checks { + check(t, newStargz, tocDigest, dgstMap, cl, newCL) + } + }) + } } } } @@ -654,7 +692,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) { // checkStargzTOC checks the TOC JSON of the passed stargz has the expected // digest and contains valid chunks. It walks all entries in the stargz and // checks all chunk digests stored to the TOC JSON match the actual contents. -func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { +func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -765,7 +803,7 @@ func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyTOC checks the verification works for the TOC JSON of the passed // stargz. It walks all entries in the stargz and checks the verifications for // all chunks work. -func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { +func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -846,7 +884,7 @@ func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be // detected during the verification and the verification returns an error. func checkVerifyInvalidTOCEntryFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { funcs := map[string]rewriteFunc{ "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { var found bool @@ -920,8 +958,9 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { // checkVerifyInvalidStargzFail checks if the verification detects that the // given stargz file doesn't match to the expected digest and returns error. func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { - rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(controller)) + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + cl := newController() + rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl)) if err != nil { t.Fatalf("failed to convert stargz: %v", err) } @@ -934,7 +973,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { sgz, err := Open( io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))), - WithDecompressors(controller), + WithDecompressors(cl), ) if err != nil { t.Fatalf("failed to parse converted stargz: %v", err) @@ -951,7 +990,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { // checkVerifyBrokenContentFail checks if the verifier detects broken contents // that doesn't match to the expected digest and returns error. func checkVerifyBrokenContentFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { // Parse stargz file sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), @@ -1070,7 +1109,10 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT } // Decode the TOC JSON - tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + var tocReader io.Reader + if tocOffset >= 0 { + tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + } decodedJTOC, _, err = controller.ParseTOC(tocReader) if err != nil { return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) @@ -1078,28 +1120,31 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT return decodedJTOC, tocOffset, nil } -func testWriteAndOpen(t *testing.T, controllers ...TestingController) { +func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { const content = "Some contents" invalidUtf8 := "\xff\xfe\xfd" xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8} sampleOwner := owner{uid: 50, gid: 100} + data64KB := randomContents(64000) + tests := []struct { - name string - chunkSize int - in []tarEntry - want []stargzCheck - wantNumGz int // expected number of streams + name string + chunkSize int + minChunkSize int + in []tarEntry + want []stargzCheck + wantNumGz int // expected number of streams wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz wantFailOnLossLess bool + wantTOCVersion int // default = 1 }{ { - name: "empty", - in: tarOf(), - wantNumGz: 2, // empty tar + TOC + footer - wantNumGzLossLess: 3, // empty tar + TOC + footer + name: "empty", + in: tarOf(), + wantNumGz: 2, // (empty tar) + TOC + footer want: checks( numTOCEntries(0), ), @@ -1195,7 +1240,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { dir("foo/"), file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"), ), - wantNumGz: 9, + wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer want: checks( numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file hasDir("foo/"), @@ -1314,23 +1359,120 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { ), wantFailOnLossLess: true, }, + { + name: "hardlink should be replaced to the destination entry", + in: tarOf( + dir("foo/"), + file("foo/foo1", "test"), + link("foolink", "foo/foo1"), + ), + wantNumGz: 4, // dir, foo1 + link, TOC, footer + want: checks( + mustSameEntry("foo/foo1", "foolink"), + ), + }, + { + name: "several_files_in_chunk", + minChunkSize: 8000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + file("foo22", "ccc"), + dir("bar/"), + file("bar/bar.txt", "aaa"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed "data64KB" is still larger than 8KB + wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3 + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo22", len("ccc")), + hasFileLen("bar/bar.txt", len("aaa")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo22", digestFor("ccc")), + hasFileDigest("bar/bar.txt", digestFor("aaa")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo22", 0, "ccc"), + hasFileContentsRange("foo22", 1, "cc"), + hasFileContentsRange("foo22", 2, "c"), + hasFileContentsRange("bar/bar.txt", 0, "aaa"), + hasFileContentsRange("bar/bar.txt", 1, "aa"), + hasFileContentsRange("bar/bar.txt", 2, "a"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, + { + name: "several_files_in_chunk_chunked", + minChunkSize: 8000, + chunkSize: 32000, + in: tarOf( + dir("foo/"), + file("foo/foo1", data64KB), + file("foo2", "bb"), + dir("bar/"), + file("foo3", data64KB), + ), + // NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB + wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer + want: checks( + numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks) + hasDir("foo/"), + hasDir("bar/"), + hasFileLen("foo/foo1", len(data64KB)), + hasFileLen("foo2", len("bb")), + hasFileLen("foo3", len(data64KB)), + hasFileDigest("foo/foo1", digestFor(data64KB)), + hasFileDigest("foo2", digestFor("bb")), + hasFileDigest("foo3", digestFor(data64KB)), + hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}), + hasFileContentsRange("foo/foo1", 0, data64KB), + hasFileContentsRange("foo/foo1", 1, data64KB[1:]), + hasFileContentsRange("foo/foo1", 2, data64KB[2:]), + hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]), + hasFileContentsRange("foo2", 0, "bb"), + hasFileContentsRange("foo2", 1, "b"), + hasFileContentsRange("foo3", 0, data64KB), + hasFileContentsRange("foo3", 1, data64KB[1:]), + hasFileContentsRange("foo3", 2, data64KB[2:]), + hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]), + hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]), + ), + }, } for _, tt := range tests { - for _, cl := range controllers { - cl := cl + for _, newCL := range controllers { + newCL := newCL for _, prefix := range allowedPrefix { prefix := prefix for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat for _, lossless := range []bool{true, false} { - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) { var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) origTarDgstr := digest.Canonical.Digester() tr = io.TeeReader(tr, origTarDgstr.Hash()) var stargzBuf bytes.Buffer - w := NewWriterWithCompressor(&stargzBuf, cl) + cl1 := newCL() + w := NewWriterWithCompressor(&stargzBuf, cl1) w.ChunkSize = tt.chunkSize + w.MinChunkSize = tt.minChunkSize if lossless { err := w.AppendTarLossLess(tr) if tt.wantFailOnLossLess { @@ -1354,7 +1496,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { if lossless { // Check if the result blob reserves original tar metadata - rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl) + rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1) if err != nil { t.Errorf("failed to decompress blob: %v", err) return @@ -1373,32 +1515,71 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { } diffID := w.DiffID() - wantDiffID := cl.DiffIDOf(t, b) + wantDiffID := cl1.DiffIDOf(t, b) if diffID != wantDiffID { t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) } - got := cl.CountStreams(t, b) - wantNumGz := tt.wantNumGz - if lossless && tt.wantNumGzLossLess > 0 { - wantNumGz = tt.wantNumGzLossLess - } - if got != wantNumGz { - t.Errorf("number of streams = %d; want %d", got, wantNumGz) - } - telemetry, checkCalled := newCalledTelemetry() + sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))) r, err := Open( - io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), - WithDecompressors(cl), + sr, + WithDecompressors(cl1), WithTelemetry(telemetry), ) if err != nil { t.Fatalf("stargz.Open: %v", err) } - if err := checkCalled(); err != nil { + wantTOCVersion := 1 + if tt.wantTOCVersion > 0 { + wantTOCVersion = tt.wantTOCVersion + } + if r.toc.Version != wantTOCVersion { + t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion) + } + + footerSize := cl1.FooterSize() + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + t.Errorf("failed to read footer: %v", err) + } + _, tocOffset, _, err := cl1.ParseFooter(footer) + if err != nil { + t.Errorf("failed to parse footer: %v", err) + } + if err := checkCalled(tocOffset >= 0); err != nil { t.Errorf("telemetry failure: %v", err) } + + wantNumGz := tt.wantNumGz + if lossless && tt.wantNumGzLossLess > 0 { + wantNumGz = tt.wantNumGzLossLess + } + streamOffsets := []int64{0} + prevOffset := int64(-1) + streams := 0 + for _, e := range r.toc.Entries { + if e.Offset > prevOffset { + streamOffsets = append(streamOffsets, e.Offset) + prevOffset = e.Offset + streams++ + } + } + streams++ // TOC + if tocOffset >= 0 { + // toc is in the blob + streamOffsets = append(streamOffsets, tocOffset) + } + streams++ // footer + streamOffsets = append(streamOffsets, footerOffset) + if streams != wantNumGz { + t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz) + } + + t.Logf("testing streams: %+v", streamOffsets) + cl1.TestStreams(t, b, streamOffsets) + for _, want := range tt.want { want.check(t, r) } @@ -1410,7 +1591,12 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) { } } -func newCalledTelemetry() (telemetry *Telemetry, check func() error) { +type chunkInfo struct { + name string + data string +} + +func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) { var getFooterLatencyCalled bool var getTocLatencyCalled bool var deserializeTocLatencyCalled bool @@ -1418,13 +1604,15 @@ func newCalledTelemetry() (telemetry *Telemetry, check func() error) { func(time.Time) { getFooterLatencyCalled = true }, func(time.Time) { getTocLatencyCalled = true }, func(time.Time) { deserializeTocLatencyCalled = true }, - }, func() error { + }, func(needsGetTOC bool) error { var allErr []error if !getFooterLatencyCalled { allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) } - if !getTocLatencyCalled { - allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + if needsGetTOC { + if !getTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + } } if !deserializeTocLatencyCalled { allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) @@ -1561,6 +1749,53 @@ func hasFileDigest(file string, digest string) stargzCheck { }) } +func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + extraMap := make(map[string]chunkInfo) + for _, e := range extra { + extraMap[e.name] = e + } + var extraNames []string + for n := range extraMap { + extraNames = append(extraNames, n) + } + f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error { + t.Logf("On %q: got preread of %q", file, e.Name) + ex, ok := extraMap[e.Name] + if !ok { + t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames) + } + got, err := io.ReadAll(cr) + if err != nil { + t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err) + } + if ex.data != string(got) { + t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data)) + } + delete(extraMap, e.Name) + return nil + }) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) + } + if len(extraMap) != 0 { + var exNames []string + for _, ex := range extraMap { + exNames = append(exNames, ex.name) + } + t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames) + } + }) +} + func hasFileContentsRange(file string, offset int, want string) stargzCheck { return stargzCheckFn(func(t *testing.T, r *Reader) { f, err := r.OpenFile(file) @@ -1573,7 +1808,7 @@ func hasFileContentsRange(file string, offset int, want string) stargzCheck { t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err) } if string(got) != want { - t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, got, want) + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want))) } }) } @@ -1731,6 +1966,67 @@ func hasEntryOwner(entry string, owner owner) stargzCheck { }) } +func mustSameEntry(files ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + var first *TOCEntry + for _, f := range files { + if first == nil { + var ok bool + first, ok = r.Lookup(f) + if !ok { + t.Errorf("unknown first file on Lookup: %q", f) + return + } + } + + // Test Lookup + e, ok := r.Lookup(f) + if !ok { + t.Errorf("unknown file on Lookup: %q", f) + return + } + if e != first { + t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test LookupChild + pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get parent of %q", f) + return + } + e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get %q as the child of %+v", f, pe) + return + } + if e != first { + t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test ForeachChild + pe.ForeachChild(func(baseName string, e *TOCEntry) bool { + if baseName == filepath.Base(filepath.Clean(f)) { + if e != first { + t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first) + return false + } + } + return true + }) + } + }) +} + +func viewContent(c []byte) string { + if len(c) < 100 { + return string(c) + } + return string(c[:50]) + "...(omit)..." + string(c[50:100]) +} + func tarOf(s ...tarEntry) []tarEntry { return s } type tarEntry interface { @@ -1990,6 +2286,16 @@ func regDigest(t *testing.T, name string, contentStr string, digestMap map[strin }) } +var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randomContents(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = runes[rand.Intn(len(runes))] + } + return string(b) +} + func fileModeToTarMode(mode os.FileMode) (int64, error) { h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "") if err != nil { @@ -2007,3 +2313,54 @@ func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) } func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } func (f fileInfoOnlyMode) Sys() interface{} { return nil } + +func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { + if len(streams) == 0 { + return // nop + } + + wants := map[int64]struct{}{} + for _, s := range streams { + wants[s] = struct{}{} + } + + len0 := len(b) + br := bytes.NewReader(b) + zr := new(gzip.Reader) + t.Logf("got gzip streams:") + numStreams := 0 + for { + zoff := len0 - br.Len() + if err := zr.Reset(br); err != nil { + if err == io.EOF { + return + } + t.Fatalf("countStreams(gzip), Reset: %v", err) + } + zr.Multistream(false) + n, err := io.Copy(io.Discard, zr) + if err != nil { + t.Fatalf("countStreams(gzip), Copy: %v", err) + } + var extra string + if len(zr.Header.Extra) > 0 { + extra = fmt.Sprintf("; extra=%q", zr.Header.Extra) + } + t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra) + delete(wants, int64(zoff)) + numStreams++ + } +} + +func GzipDiffIDOf(t *testing.T, b []byte) string { + h := sha256.New() + zr, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("diffIDOf(gzip): %v", err) + } + defer zr.Close() + if _, err := io.Copy(h, zr); err != nil { + t.Fatalf("diffIDOf(gzip).Copy: %v", err) + } + return fmt.Sprintf("sha256:%x", h.Sum(nil)) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go index 384ff7fd7f..57e0aa614e 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -149,6 +149,12 @@ type TOCEntry struct { // ChunkSize. Offset int64 `json:"offset,omitempty"` + // InnerOffset is an optional field indicates uncompressed offset + // of this "reg" or "chunk" payload in a stream starts from Offset. + // This field enables to put multiple "reg" or "chunk" payloads + // in one chunk with having the same Offset but different InnerOffset. + InnerOffset int64 `json:"innerOffset,omitempty"` + nextOffset int64 // the Offset of the next entry with a non-zero Offset // DevMajor is the major device number for "char" and "block" types. @@ -159,7 +165,8 @@ type TOCEntry struct { // NumLink is the number of entry names pointing to this entry. // Zero means one name references this entry. - NumLink int + // This field is calculated during runtime and not recorded in TOC JSON. + NumLink int `json:"-"` // Xattrs are the extended attribute for the entry. Xattrs map[string][]byte `json:"xattrs,omitempty"` @@ -185,6 +192,9 @@ type TOCEntry struct { ChunkDigest string `json:"chunkDigest,omitempty"` children map[string]*TOCEntry + + // chunkTopIndex is index of the entry where Offset starts in the blob. + chunkTopIndex int } // ModTime returns the entry's modification time. @@ -278,7 +288,10 @@ type Compressor interface { // Writer returns WriteCloser to be used for writing a chunk to eStargz. // Everytime a chunk is written, the WriteCloser is closed and Writer is // called again for writing the next chunk. - Writer(w io.Writer) (io.WriteCloser, error) + // + // The returned writer should implement "Flush() error" function that flushes + // any pending compressed data to the underlying writer. + Writer(w io.Writer) (WriteFlushCloser, error) // WriteTOCAndFooter is called to write JTOC to the passed Writer. // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob @@ -302,8 +315,12 @@ type Decompressor interface { // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between // the top until the TOC JSON). // - // Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range - // from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize). + // If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader + // to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it. + // + // tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the + // footer (blob size - tocOff - FooterSize). + // If blobPayloadSize < 0, blobPayloadSize become the blob size. ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) // ParseTOC parses TOC from the passed reader. The reader provides the partial contents @@ -312,5 +329,14 @@ type Decompressor interface { // This function returns tocDgst that represents the digest of TOC that will be used // to verify this blob. This must match to the value returned from // Compressor.WriteTOCAndFooter that is used when creating this blob. + // + // If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob. + // Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location + // and return it. ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) } + +type WriteFlushCloser interface { + io.WriteCloser + Flush() error +} diff --git a/vendor/github.com/in-toto/in-toto-golang/LICENSE b/vendor/github.com/in-toto/in-toto-golang/LICENSE new file mode 100644 index 0000000000..963ee949e8 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/LICENSE @@ -0,0 +1,13 @@ +Copyright 2018 New York University + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go new file mode 100644 index 0000000000..9b1de12b18 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go @@ -0,0 +1,156 @@ +package in_toto + +import ( + "crypto/x509" + "fmt" + "net/url" +) + +const ( + AllowAllConstraint = "*" +) + +// CertificateConstraint defines the attributes a certificate must have to act as a functionary. +// A wildcard `*` allows any value in the specified attribute, where as an empty array or value +// asserts that the certificate must have nothing for that attribute. A certificate must have +// every value defined in a constraint to match. +type CertificateConstraint struct { + CommonName string `json:"common_name"` + DNSNames []string `json:"dns_names"` + Emails []string `json:"emails"` + Organizations []string `json:"organizations"` + Roots []string `json:"roots"` + URIs []string `json:"uris"` +} + +// checkResult is a data structure used to hold +// certificate constraint errors +type checkResult struct { + errors []error +} + +// newCheckResult initializes a new checkResult +func newCheckResult() *checkResult { + return &checkResult{ + errors: make([]error, 0), + } +} + +// evaluate runs a constraint check on a certificate +func (cr *checkResult) evaluate(cert *x509.Certificate, constraintCheck func(*x509.Certificate) error) *checkResult { + err := constraintCheck(cert) + if err != nil { + cr.errors = append(cr.errors, err) + } + return cr +} + +// error reduces all of the errors into one error with a +// combined error message. If there are no errors, nil +// will be returned. +func (cr *checkResult) error() error { + if len(cr.errors) == 0 { + return nil + } + return fmt.Errorf("cert failed constraints check: %+q", cr.errors) +} + +// Check tests the provided certificate against the constraint. An error is returned if the certificate +// fails any of the constraints. nil is returned if the certificate passes all of the constraints. +func (cc CertificateConstraint) Check(cert *x509.Certificate, rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) error { + return newCheckResult(). + evaluate(cert, cc.checkCommonName). + evaluate(cert, cc.checkDNSNames). + evaluate(cert, cc.checkEmails). + evaluate(cert, cc.checkOrganizations). + evaluate(cert, cc.checkRoots(rootCAIDs, rootCertPool, intermediateCertPool)). + evaluate(cert, cc.checkURIs). + error() +} + +// checkCommonName verifies that the certificate's common name matches the constraint. +func (cc CertificateConstraint) checkCommonName(cert *x509.Certificate) error { + return checkCertConstraint("common name", []string{cc.CommonName}, []string{cert.Subject.CommonName}) +} + +// checkDNSNames verifies that the certificate's dns names matches the constraint. +func (cc CertificateConstraint) checkDNSNames(cert *x509.Certificate) error { + return checkCertConstraint("dns name", cc.DNSNames, cert.DNSNames) +} + +// checkEmails verifies that the certificate's emails matches the constraint. +func (cc CertificateConstraint) checkEmails(cert *x509.Certificate) error { + return checkCertConstraint("email", cc.Emails, cert.EmailAddresses) +} + +// checkOrganizations verifies that the certificate's organizations matches the constraint. +func (cc CertificateConstraint) checkOrganizations(cert *x509.Certificate) error { + return checkCertConstraint("organization", cc.Organizations, cert.Subject.Organization) +} + +// checkRoots verifies that the certificate's roots matches the constraint. +// The certificates trust chain must also be verified. +func (cc CertificateConstraint) checkRoots(rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) func(*x509.Certificate) error { + return func(cert *x509.Certificate) error { + _, err := VerifyCertificateTrust(cert, rootCertPool, intermediateCertPool) + if err != nil { + return fmt.Errorf("failed to verify roots: %w", err) + } + return checkCertConstraint("root", cc.Roots, rootCAIDs) + } +} + +// checkURIs verifies that the certificate's URIs matches the constraint. +func (cc CertificateConstraint) checkURIs(cert *x509.Certificate) error { + return checkCertConstraint("uri", cc.URIs, urisToStrings(cert.URIs)) +} + +// urisToStrings is a helper that converts a list of URL objects to the string that represents them +func urisToStrings(uris []*url.URL) []string { + res := make([]string, 0, len(uris)) + for _, uri := range uris { + res = append(res, uri.String()) + } + + return res +} + +// checkCertConstraint tests that the provided test values match the allowed values of the constraint. +// All allowed values must be met one-to-one to be considered a successful match. +func checkCertConstraint(attributeName string, constraints, values []string) error { + // If the only constraint is to allow all, the check succeeds + if len(constraints) == 1 && constraints[0] == AllowAllConstraint { + return nil + } + + if len(constraints) == 1 && constraints[0] == "" { + constraints = []string{} + } + + if len(values) == 1 && values[0] == "" { + values = []string{} + } + + // If no constraints are specified, but the certificate has values for the attribute, then the check fails + if len(constraints) == 0 && len(values) > 0 { + return fmt.Errorf("not expecting any %s(s), but cert has %d %s(s)", attributeName, len(values), attributeName) + } + + unmet := NewSet(constraints...) + for _, v := range values { + // if the cert has a value we didn't expect, fail early + if !unmet.Has(v) { + return fmt.Errorf("cert has an unexpected %s %s given constraints %+q", attributeName, v, constraints) + } + + // consider the constraint met + unmet.Remove(v) + } + + // if we have any unmet left after going through each test value, fail. + if len(unmet) > 0 { + return fmt.Errorf("cert with %s(s) %+q did not pass all constraints %+q", attributeName, values, constraints) + } + + return nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go new file mode 100644 index 0000000000..bdfc65d69f --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go @@ -0,0 +1,30 @@ +package in_toto + +import ( + "crypto/sha256" + "crypto/sha512" + "hash" +) + +/* +getHashMapping returns a mapping from hash algorithm to supported hash +interface. +*/ +func getHashMapping() map[string]func() hash.Hash { + return map[string]func() hash.Hash{ + "sha256": sha256.New, + "sha512": sha512.New, + "sha384": sha512.New384, + } +} + +/* +hashToHex calculates the hash over data based on hash algorithm h. +*/ +func hashToHex(h hash.Hash, data []byte) []byte { + h.Write(data) + // We need to use h.Sum(nil) here, because otherwise hash.Sum() appends + // the hash to the passed data. So instead of having only the hash + // we would get: "dataHASH" + return h.Sum(nil) +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go new file mode 100644 index 0000000000..7de482821a --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go @@ -0,0 +1,670 @@ +package in_toto + +import ( + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" +) + +// ErrFailedPEMParsing gets returned when PKCS1, PKCS8 or PKIX key parsing fails +var ErrFailedPEMParsing = errors.New("failed parsing the PEM block: unsupported PEM type") + +// ErrNoPEMBlock gets triggered when there is no PEM block in the provided file +var ErrNoPEMBlock = errors.New("failed to decode the data as PEM block (are you sure this is a pem file?)") + +// ErrUnsupportedKeyType is returned when we are dealing with a key type different to ed25519 or RSA +var ErrUnsupportedKeyType = errors.New("unsupported key type") + +// ErrInvalidSignature is returned when the signature is invalid +var ErrInvalidSignature = errors.New("invalid signature") + +// ErrInvalidKey is returned when a given key is none of RSA, ECDSA or ED25519 +var ErrInvalidKey = errors.New("invalid key") + +const ( + rsaKeyType string = "rsa" + ecdsaKeyType string = "ecdsa" + ed25519KeyType string = "ed25519" + rsassapsssha256Scheme string = "rsassa-pss-sha256" + ecdsaSha2nistp224 string = "ecdsa-sha2-nistp224" + ecdsaSha2nistp256 string = "ecdsa-sha2-nistp256" + ecdsaSha2nistp384 string = "ecdsa-sha2-nistp384" + ecdsaSha2nistp521 string = "ecdsa-sha2-nistp521" + ed25519Scheme string = "ed25519" + pemPublicKey string = "PUBLIC KEY" + pemPrivateKey string = "PRIVATE KEY" + pemRSAPrivateKey string = "RSA PRIVATE KEY" +) + +/* +getSupportedKeyIDHashAlgorithms returns a string slice of supported +KeyIDHashAlgorithms. We need to use this function instead of a constant, +because Go does not support global constant slices. +*/ +func getSupportedKeyIDHashAlgorithms() Set { + return NewSet("sha256", "sha512") +} + +/* +getSupportedRSASchemes returns a string slice of supported RSA Key schemes. +We need to use this function instead of a constant because Go does not support +global constant slices. +*/ +func getSupportedRSASchemes() []string { + return []string{rsassapsssha256Scheme} +} + +/* +getSupportedEcdsaSchemes returns a string slice of supported ecdsa Key schemes. +We need to use this function instead of a constant because Go does not support +global constant slices. +*/ +func getSupportedEcdsaSchemes() []string { + return []string{ecdsaSha2nistp224, ecdsaSha2nistp256, ecdsaSha2nistp384, ecdsaSha2nistp521} +} + +/* +getSupportedEd25519Schemes returns a string slice of supported ed25519 Key +schemes. We need to use this function instead of a constant because Go does +not support global constant slices. +*/ +func getSupportedEd25519Schemes() []string { + return []string{ed25519Scheme} +} + +/* +generateKeyID creates a partial key map and generates the key ID +based on the created partial key map via the SHA256 method. +The resulting keyID will be directly saved in the corresponding key object. +On success generateKeyID will return nil, in case of errors while encoding +there will be an error. +*/ +func (k *Key) generateKeyID() error { + // Create partial key map used to create the keyid + // Unfortunately, we can't use the Key object because this also carries + // yet unwanted fields, such as KeyID and KeyVal.Private and therefore + // produces a different hash. We generate the keyID exactly as we do in + // the securesystemslib to keep interoperability between other in-toto + // implementations. + var keyToBeHashed = map[string]interface{}{ + "keytype": k.KeyType, + "scheme": k.Scheme, + "keyid_hash_algorithms": k.KeyIDHashAlgorithms, + "keyval": map[string]string{ + "public": k.KeyVal.Public, + }, + } + keyCanonical, err := cjson.EncodeCanonical(keyToBeHashed) + if err != nil { + return err + } + // calculate sha256 and return string representation of keyID + keyHashed := sha256.Sum256(keyCanonical) + k.KeyID = fmt.Sprintf("%x", keyHashed) + err = validateKey(*k) + if err != nil { + return err + } + return nil +} + +/* +generatePEMBlock creates a PEM block from scratch via the keyBytes and the pemType. +If successful it returns a PEM block as []byte slice. This function should always +succeed, if keyBytes is empty the PEM block will have an empty byte block. +Therefore only header and footer will exist. +*/ +func generatePEMBlock(keyBytes []byte, pemType string) []byte { + // construct PEM block + pemBlock := &pem.Block{ + Type: pemType, + Headers: nil, + Bytes: keyBytes, + } + return pem.EncodeToMemory(pemBlock) +} + +/* +setKeyComponents sets all components in our key object. +Furthermore it makes sure to remove any trailing and leading whitespaces or newlines. +We treat key types differently for interoperability reasons to the in-toto python +implementation and the securesystemslib. +*/ +func (k *Key) setKeyComponents(pubKeyBytes []byte, privateKeyBytes []byte, keyType string, scheme string, KeyIDHashAlgorithms []string) error { + // assume we have a privateKey if the key size is bigger than 0 + + switch keyType { + case rsaKeyType: + if len(privateKeyBytes) > 0 { + k.KeyVal = KeyVal{ + Private: strings.TrimSpace(string(generatePEMBlock(privateKeyBytes, pemRSAPrivateKey))), + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } else { + k.KeyVal = KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } + case ecdsaKeyType: + if len(privateKeyBytes) > 0 { + k.KeyVal = KeyVal{ + Private: strings.TrimSpace(string(generatePEMBlock(privateKeyBytes, pemPrivateKey))), + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } else { + k.KeyVal = KeyVal{ + Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))), + } + } + case ed25519KeyType: + if len(privateKeyBytes) > 0 { + k.KeyVal = KeyVal{ + Private: strings.TrimSpace(hex.EncodeToString(privateKeyBytes)), + Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes)), + } + } else { + k.KeyVal = KeyVal{ + Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes)), + } + } + default: + return fmt.Errorf("%w: %s", ErrUnsupportedKeyType, keyType) + } + k.KeyType = keyType + k.Scheme = scheme + k.KeyIDHashAlgorithms = KeyIDHashAlgorithms + if err := k.generateKeyID(); err != nil { + return err + } + return nil +} + +/* +parseKey tries to parse a PEM []byte slice. Using the following standards +in the given order: + + - PKCS8 + - PKCS1 + - PKIX + +On success it returns the parsed key and nil. +On failure it returns nil and the error ErrFailedPEMParsing +*/ +func parseKey(data []byte) (interface{}, error) { + key, err := x509.ParsePKCS8PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKCS1PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKIXPublicKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParseCertificate(data) + if err == nil { + return key, nil + } + key, err = x509.ParseECPrivateKey(data) + if err == nil { + return key, nil + } + return nil, ErrFailedPEMParsing +} + +/* +decodeAndParse receives potential PEM bytes decodes them via pem.Decode +and pushes them to parseKey. If any error occurs during this process, +the function will return nil and an error (either ErrFailedPEMParsing +or ErrNoPEMBlock). On success it will return the decoded pemData, the +key object interface and nil as error. We need the decoded pemData, +because LoadKey relies on decoded pemData for operating system +interoperability. +*/ +func decodeAndParse(pemBytes []byte) (*pem.Block, interface{}, error) { + // pem.Decode returns the parsed pem block and a rest. + // The rest is everything, that could not be parsed as PEM block. + // Therefore we can drop this via using the blank identifier "_" + data, _ := pem.Decode(pemBytes) + if data == nil { + return nil, nil, ErrNoPEMBlock + } + + // Try to load private key, if this fails try to load + // key as public key + key, err := parseKey(data.Bytes) + if err != nil { + return nil, nil, err + } + return data, key, nil +} + +/* +LoadKey loads the key file at specified file path into the key object. +It automatically derives the PEM type and the key type. +Right now the following PEM types are supported: + + - PKCS1 for private keys + - PKCS8 for private keys + - PKIX for public keys + +The following key types are supported and will be automatically assigned to +the key type field: + + - ed25519 + - rsa + - ecdsa + +The following schemes are supported: + + - ed25519 -> ed25519 + - rsa -> rsassa-pss-sha256 + - ecdsa -> ecdsa-sha256-nistp256 + +Note that, this behavior is consistent with the securesystemslib, except for +ecdsa. We do not use the scheme string as key type in in-toto-golang. +Instead we are going with a ecdsa/ecdsa-sha2-nistp256 pair. + +On success it will return nil. The following errors can happen: + + - path not found or not readable + - no PEM block in the loaded file + - no valid PKCS8/PKCS1 private key or PKIX public key + - errors while marshalling + - unsupported key types +*/ +func (k *Key) LoadKey(path string, scheme string, KeyIDHashAlgorithms []string) error { + pemFile, err := os.Open(path) + if err != nil { + return err + } + defer pemFile.Close() + + err = k.LoadKeyReader(pemFile, scheme, KeyIDHashAlgorithms) + if err != nil { + return err + } + + return pemFile.Close() +} + +func (k *Key) LoadKeyDefaults(path string) error { + pemFile, err := os.Open(path) + if err != nil { + return err + } + defer pemFile.Close() + + err = k.LoadKeyReaderDefaults(pemFile) + if err != nil { + return err + } + + return pemFile.Close() +} + +// LoadKeyReader loads the key from a supplied reader. The logic matches LoadKey otherwise. +func (k *Key) LoadKeyReader(r io.Reader, scheme string, KeyIDHashAlgorithms []string) error { + if r == nil { + return ErrNoPEMBlock + } + // Read key bytes + pemBytes, err := ioutil.ReadAll(r) + if err != nil { + return err + } + // decodeAndParse returns the pemData for later use + // and a parsed key object (for operations on that key, like extracting the public Key) + pemData, key, err := decodeAndParse(pemBytes) + if err != nil { + return err + } + + return k.loadKey(key, pemData, scheme, KeyIDHashAlgorithms) +} + +func (k *Key) LoadKeyReaderDefaults(r io.Reader) error { + if r == nil { + return ErrNoPEMBlock + } + // Read key bytes + pemBytes, err := ioutil.ReadAll(r) + if err != nil { + return err + } + // decodeAndParse returns the pemData for later use + // and a parsed key object (for operations on that key, like extracting the public Key) + pemData, key, err := decodeAndParse(pemBytes) + if err != nil { + return err + } + + scheme, keyIDHashAlgorithms, err := getDefaultKeyScheme(key) + if err != nil { + return err + } + + return k.loadKey(key, pemData, scheme, keyIDHashAlgorithms) +} + +func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms []string, err error) { + keyIDHashAlgorithms = []string{"sha256", "sha512"} + + switch key.(type) { + case *rsa.PublicKey, *rsa.PrivateKey: + scheme = rsassapsssha256Scheme + case ed25519.PrivateKey, ed25519.PublicKey: + scheme = ed25519Scheme + case *ecdsa.PrivateKey, *ecdsa.PublicKey: + scheme = ecdsaSha2nistp256 + case *x509.Certificate: + return getDefaultKeyScheme(key.(*x509.Certificate).PublicKey) + default: + err = ErrUnsupportedKeyType + } + + return scheme, keyIDHashAlgorithms, err +} + +func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDHashAlgorithms []string) error { + + switch key.(type) { + case *rsa.PublicKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PublicKey)) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, []byte{}, rsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *rsa.PrivateKey: + // Note: RSA Public Keys will get stored as X.509 SubjectPublicKeyInfo (RFC5280) + // This behavior is consistent to the securesystemslib + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PrivateKey).Public()) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, pemData.Bytes, rsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case ed25519.PublicKey: + if err := k.setKeyComponents(key.(ed25519.PublicKey), []byte{}, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case ed25519.PrivateKey: + pubKeyBytes := key.(ed25519.PrivateKey).Public() + if err := k.setKeyComponents(pubKeyBytes.(ed25519.PublicKey), key.(ed25519.PrivateKey), ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *ecdsa.PrivateKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PrivateKey).Public()) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, pemData.Bytes, ecdsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *ecdsa.PublicKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PublicKey)) + if err != nil { + return err + } + if err := k.setKeyComponents(pubKeyBytes, []byte{}, ecdsaKeyType, scheme, keyIDHashAlgorithms); err != nil { + return err + } + case *x509.Certificate: + err := k.loadKey(key.(*x509.Certificate).PublicKey, pemData, scheme, keyIDHashAlgorithms) + if err != nil { + return err + } + + k.KeyVal.Certificate = string(pem.EncodeToMemory(pemData)) + + default: + // We should never get here, because we implement all from Go supported Key Types + return errors.New("unexpected Error in LoadKey function") + } + + return nil +} + +/* +GenerateSignature will automatically detect the key type and sign the signable data +with the provided key. If everything goes right GenerateSignature will return +a for the key valid signature and err=nil. If something goes wrong it will +return a not initialized signature and an error. Possible errors are: + + - ErrNoPEMBlock + - ErrUnsupportedKeyType + +Currently supported is only one scheme per key. + +Note that in-toto-golang has different requirements to an ecdsa key. +In in-toto-golang we use the string 'ecdsa' as string for the key type. +In the key scheme we use: ecdsa-sha2-nistp256. +*/ +func GenerateSignature(signable []byte, key Key) (Signature, error) { + err := validateKey(key) + if err != nil { + return Signature{}, err + } + var signature Signature + var signatureBuffer []byte + hashMapping := getHashMapping() + // The following switch block is needed for keeping interoperability + // with the securesystemslib and the python implementation + // in which we are storing RSA keys in PEM format, but ed25519 keys hex encoded. + switch key.KeyType { + case rsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private)) + if err != nil { + return Signature{}, err + } + parsedKey, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return Signature{}, ErrKeyKeyTypeMismatch + } + switch key.Scheme { + case rsassapsssha256Scheme: + hashed := hashToHex(hashMapping["sha256"](), signable) + // We use rand.Reader as secure random source for rsa.SignPSS() + signatureBuffer, err = rsa.SignPSS(rand.Reader, parsedKey.(*rsa.PrivateKey), crypto.SHA256, hashed, + &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}) + if err != nil { + return signature, err + } + default: + // supported key schemes will get checked in validateKey + panic("unexpected Error in GenerateSignature function") + } + case ecdsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private)) + if err != nil { + return Signature{}, err + } + parsedKey, ok := parsedKey.(*ecdsa.PrivateKey) + if !ok { + return Signature{}, ErrKeyKeyTypeMismatch + } + curveSize := parsedKey.(*ecdsa.PrivateKey).Curve.Params().BitSize + var hashed []byte + if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil { + return Signature{}, ErrCurveSizeSchemeMismatch + } + // implement https://tools.ietf.org/html/rfc5656#section-6.2.1 + // We determine the curve size and choose the correct hashing + // method based on the curveSize + switch { + case curveSize <= 256: + hashed = hashToHex(hashMapping["sha256"](), signable) + case 256 < curveSize && curveSize <= 384: + hashed = hashToHex(hashMapping["sha384"](), signable) + case curveSize > 384: + hashed = hashToHex(hashMapping["sha512"](), signable) + default: + panic("unexpected Error in GenerateSignature function") + } + // Generate the ecdsa signature on the same way, as we do in the securesystemslib + // We are marshalling the ecdsaSignature struct as ASN.1 INTEGER SEQUENCES + // into an ASN.1 Object. + signatureBuffer, err = ecdsa.SignASN1(rand.Reader, parsedKey.(*ecdsa.PrivateKey), hashed[:]) + if err != nil { + return signature, err + } + case ed25519KeyType: + // We do not need a scheme switch here, because ed25519 + // only consist of sha256 and curve25519. + privateHex, err := hex.DecodeString(key.KeyVal.Private) + if err != nil { + return signature, ErrInvalidHexString + } + // Note: We can directly use the key for signing and do not + // need to use ed25519.NewKeyFromSeed(). + signatureBuffer = ed25519.Sign(privateHex, signable) + default: + // We should never get here, because we call validateKey in the first + // line of the function. + panic("unexpected Error in GenerateSignature function") + } + signature.Sig = hex.EncodeToString(signatureBuffer) + signature.KeyID = key.KeyID + signature.Certificate = key.KeyVal.Certificate + return signature, nil +} + +/* +VerifySignature will verify unverified byte data via a passed key and signature. +Supported key types are: + + - rsa + - ed25519 + - ecdsa + +When encountering an RSA key, VerifySignature will decode the PEM block in the key +and will call rsa.VerifyPSS() for verifying the RSA signature. +When encountering an ed25519 key, VerifySignature will decode the hex string encoded +public key and will use ed25519.Verify() for verifying the ed25519 signature. +When the given key is an ecdsa key, VerifySignature will unmarshall the ASN1 object +and will use the retrieved ecdsa components 'r' and 's' for verifying the signature. +On success it will return nil. In case of an unsupported key type or any other error +it will return an error. + +Note that in-toto-golang has different requirements to an ecdsa key. +In in-toto-golang we use the string 'ecdsa' as string for the key type. +In the key scheme we use: ecdsa-sha2-nistp256. +*/ +func VerifySignature(key Key, sig Signature, unverified []byte) error { + err := validateKey(key) + if err != nil { + return err + } + sigBytes, err := hex.DecodeString(sig.Sig) + if err != nil { + return err + } + hashMapping := getHashMapping() + switch key.KeyType { + case rsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public)) + if err != nil { + return err + } + parsedKey, ok := parsedKey.(*rsa.PublicKey) + if !ok { + return ErrKeyKeyTypeMismatch + } + switch key.Scheme { + case rsassapsssha256Scheme: + hashed := hashToHex(hashMapping["sha256"](), unverified) + err = rsa.VerifyPSS(parsedKey.(*rsa.PublicKey), crypto.SHA256, hashed, sigBytes, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}) + if err != nil { + return fmt.Errorf("%w: %s", ErrInvalidSignature, err) + } + default: + // supported key schemes will get checked in validateKey + panic("unexpected Error in VerifySignature function") + } + case ecdsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public)) + if err != nil { + return err + } + parsedKey, ok := parsedKey.(*ecdsa.PublicKey) + if !ok { + return ErrKeyKeyTypeMismatch + } + curveSize := parsedKey.(*ecdsa.PublicKey).Curve.Params().BitSize + var hashed []byte + if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil { + return ErrCurveSizeSchemeMismatch + } + // implement https://tools.ietf.org/html/rfc5656#section-6.2.1 + // We determine the curve size and choose the correct hashing + // method based on the curveSize + switch { + case curveSize <= 256: + hashed = hashToHex(hashMapping["sha256"](), unverified) + case 256 < curveSize && curveSize <= 384: + hashed = hashToHex(hashMapping["sha384"](), unverified) + case curveSize > 384: + hashed = hashToHex(hashMapping["sha512"](), unverified) + default: + panic("unexpected Error in VerifySignature function") + } + if ok := ecdsa.VerifyASN1(parsedKey.(*ecdsa.PublicKey), hashed[:], sigBytes); !ok { + return ErrInvalidSignature + } + case ed25519KeyType: + // We do not need a scheme switch here, because ed25519 + // only consist of sha256 and curve25519. + pubHex, err := hex.DecodeString(key.KeyVal.Public) + if err != nil { + return ErrInvalidHexString + } + if ok := ed25519.Verify(pubHex, unverified, sigBytes); !ok { + return fmt.Errorf("%w: ed25519", ErrInvalidSignature) + } + default: + // We should never get here, because we call validateKey in the first + // line of the function. + panic("unexpected Error in VerifySignature function") + } + return nil +} + +/* +VerifyCertificateTrust verifies that the certificate has a chain of trust +to a root in rootCertPool, possibly using any intermediates in +intermediateCertPool +*/ +func VerifyCertificateTrust(cert *x509.Certificate, rootCertPool, intermediateCertPool *x509.CertPool) ([][]*x509.Certificate, error) { + verifyOptions := x509.VerifyOptions{ + Roots: rootCertPool, + Intermediates: intermediateCertPool, + } + chains, err := cert.Verify(verifyOptions) + if len(chains) == 0 || err != nil { + return nil, fmt.Errorf("cert cannot be verified by provided roots and intermediates") + } + return chains, nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/match.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/match.go new file mode 100644 index 0000000000..52373aa75f --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/match.go @@ -0,0 +1,227 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found at https://golang.org/LICENSE. + +// this is a modified version of path.Match that removes handling of path separators + +package in_toto + +import ( + "errors" + "unicode/utf8" +) + +// errBadPattern indicates a pattern was malformed. +var errBadPattern = errors.New("syntax error in pattern") + +// match reports whether name matches the shell pattern. +// The pattern syntax is: +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-/ characters +// '?' matches any single non-/ character +// '[' [ '^' ] { character-range } ']' +// character class (must be non-empty) +// c matches character c (c != '*', '?', '\\', '[') +// '\\' c matches character c +// +// character-range: +// c matches character c (c != '\\', '-', ']') +// '\\' c matches character c +// lo '-' hi matches character c for lo <= c <= hi +// +// Match requires pattern to match all of name, not just a substring. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +func match(pattern, name string) (matched bool, err error) { +Pattern: + for len(pattern) > 0 { + var star bool + var chunk string + star, chunk, pattern = scanChunk(pattern) + if star && chunk == "" { + // Trailing * matches everything + return true, nil + } + // Look for match at current position. + t, ok, err := matchChunk(chunk, name) + // if we're the last chunk, make sure we've exhausted the name + // otherwise we'll give a false result even if we could still match + // using the star + if ok && (len(t) == 0 || len(pattern) > 0) { + name = t + continue + } + if err != nil { + return false, err + } + if star { + // Look for match skipping i+1 bytes. + for i := 0; i < len(name); i++ { + t, ok, err := matchChunk(chunk, name[i+1:]) + if ok { + // if we're the last chunk, make sure we exhausted the name + if len(pattern) == 0 && len(t) > 0 { + continue + } + name = t + continue Pattern + } + if err != nil { + return false, err + } + } + } + // Before returning false with no error, + // check that the remainder of the pattern is syntactically valid. + for len(pattern) > 0 { + _, chunk, pattern = scanChunk(pattern) + if _, _, err := matchChunk(chunk, ""); err != nil { + return false, err + } + } + return false, nil + } + return len(name) == 0, nil +} + +// scanChunk gets the next segment of pattern, which is a non-star string +// possibly preceded by a star. +func scanChunk(pattern string) (star bool, chunk, rest string) { + for len(pattern) > 0 && pattern[0] == '*' { + pattern = pattern[1:] + star = true + } + inrange := false + var i int +Scan: + for i = 0; i < len(pattern); i++ { + switch pattern[i] { + case '\\': + // error check handled in matchChunk: bad pattern. + if i+1 < len(pattern) { + i++ + } + case '[': + inrange = true + case ']': + inrange = false + case '*': + if !inrange { + break Scan + } + } + } + return star, pattern[0:i], pattern[i:] +} + +// matchChunk checks whether chunk matches the beginning of s. +// If so, it returns the remainder of s (after the match). +// Chunk is all single-character operators: literals, char classes, and ?. +func matchChunk(chunk, s string) (rest string, ok bool, err error) { + // failed records whether the match has failed. + // After the match fails, the loop continues on processing chunk, + // checking that the pattern is well-formed but no longer reading s. + failed := false + for len(chunk) > 0 { + if !failed && len(s) == 0 { + failed = true + } + switch chunk[0] { + case '[': + // character class + var r rune + if !failed { + var n int + r, n = utf8.DecodeRuneInString(s) + s = s[n:] + } + chunk = chunk[1:] + // possibly negated + negated := false + if len(chunk) > 0 && chunk[0] == '^' { + negated = true + chunk = chunk[1:] + } + // parse all ranges + match := false + nrange := 0 + for { + if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { + chunk = chunk[1:] + break + } + var lo, hi rune + if lo, chunk, err = getEsc(chunk); err != nil { + return "", false, err + } + hi = lo + if chunk[0] == '-' { + if hi, chunk, err = getEsc(chunk[1:]); err != nil { + return "", false, err + } + } + if lo <= r && r <= hi { + match = true + } + nrange++ + } + if match == negated { + failed = true + } + + case '?': + if !failed { + _, n := utf8.DecodeRuneInString(s) + s = s[n:] + } + chunk = chunk[1:] + + case '\\': + chunk = chunk[1:] + if len(chunk) == 0 { + return "", false, errBadPattern + } + fallthrough + + default: + if !failed { + if chunk[0] != s[0] { + failed = true + } + s = s[1:] + } + chunk = chunk[1:] + } + } + if failed { + return "", false, nil + } + return s, true, nil +} + +// getEsc gets a possibly-escaped character from chunk, for a character class. +func getEsc(chunk string) (r rune, nchunk string, err error) { + if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { + err = errBadPattern + return + } + if chunk[0] == '\\' { + chunk = chunk[1:] + if len(chunk) == 0 { + err = errBadPattern + return + } + } + r, n := utf8.DecodeRuneInString(chunk) + if r == utf8.RuneError && n == 1 { + err = errBadPattern + } + nchunk = chunk[n:] + if len(nchunk) == 0 { + err = errBadPattern + } + return +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go new file mode 100644 index 0000000000..e22b79da32 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go @@ -0,0 +1,1073 @@ +package in_toto + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" + slsa01 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1" + slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" + "github.com/secure-systems-lab/go-securesystemslib/dsse" +) + +/* +KeyVal contains the actual values of a key, as opposed to key metadata such as +a key identifier or key type. For RSA keys, the key value is a pair of public +and private keys in PEM format stored as strings. For public keys the Private +field may be an empty string. +*/ +type KeyVal struct { + Private string `json:"private"` + Public string `json:"public"` + Certificate string `json:"certificate,omitempty"` +} + +/* +Key represents a generic in-toto key that contains key metadata, such as an +identifier, supported hash algorithms to create the identifier, the key type +and the supported signature scheme, and the actual key value. +*/ +type Key struct { + KeyID string `json:"keyid"` + KeyIDHashAlgorithms []string `json:"keyid_hash_algorithms"` + KeyType string `json:"keytype"` + KeyVal KeyVal `json:"keyval"` + Scheme string `json:"scheme"` +} + +// PayloadType is the payload type used for links and layouts. +const PayloadType = "application/vnd.in-toto+json" + +// ErrEmptyKeyField will be thrown if a field in our Key struct is empty. +var ErrEmptyKeyField = errors.New("empty field in key") + +// ErrInvalidHexString will be thrown, if a string doesn't match a hex string. +var ErrInvalidHexString = errors.New("invalid hex string") + +// ErrSchemeKeyTypeMismatch will be thrown, if the given scheme and key type are not supported together. +var ErrSchemeKeyTypeMismatch = errors.New("the scheme and key type are not supported together") + +// ErrUnsupportedKeyIDHashAlgorithms will be thrown, if the specified KeyIDHashAlgorithms is not supported. +var ErrUnsupportedKeyIDHashAlgorithms = errors.New("the given keyID hash algorithm is not supported") + +// ErrKeyKeyTypeMismatch will be thrown, if the specified keyType does not match the key +var ErrKeyKeyTypeMismatch = errors.New("the given key does not match its key type") + +// ErrNoPublicKey gets returned when the private key value is not empty. +var ErrNoPublicKey = errors.New("the given key is not a public key") + +// ErrCurveSizeSchemeMismatch gets returned, when the scheme and curve size are incompatible +// for example: curve size = "521" and scheme = "ecdsa-sha2-nistp224" +var ErrCurveSizeSchemeMismatch = errors.New("the scheme does not match the curve size") + +const ( + // StatementInTotoV01 is the statement type for the generalized link format + // containing statements. This is constant for all predicate types. + StatementInTotoV01 = "https://in-toto.io/Statement/v0.1" + // PredicateSPDX represents a SBOM using the SPDX standard. + // The SPDX mandates 'spdxVersion' field, so predicate type can omit + // version. + PredicateSPDX = "https://spdx.dev/Document" + // PredicateCycloneDX represents a CycloneDX SBOM + PredicateCycloneDX = "https://cyclonedx.org/bom" + // PredicateLinkV1 represents an in-toto 0.9 link. + PredicateLinkV1 = "https://in-toto.io/Link/v1" +) + +// ErrInvalidPayloadType indicates that the envelope used an unkown payload type +var ErrInvalidPayloadType = errors.New("unknown payload type") + +/* +matchEcdsaScheme checks if the scheme suffix, matches the ecdsa key +curve size. We do not need a full regex match here, because +our validateKey functions are already checking for a valid scheme string. +*/ +func matchEcdsaScheme(curveSize int, scheme string) error { + if !strings.HasSuffix(scheme, strconv.Itoa(curveSize)) { + return ErrCurveSizeSchemeMismatch + } + return nil +} + +/* +validateHexString is used to validate that a string passed to it contains +only valid hexadecimal characters. +*/ +func validateHexString(str string) error { + formatCheck, _ := regexp.MatchString("^[a-fA-F0-9]+$", str) + if !formatCheck { + return fmt.Errorf("%w: %s", ErrInvalidHexString, str) + } + return nil +} + +/* +validateKeyVal validates the KeyVal struct. In case of an ed25519 key, +it will check for a hex string for private and public key. In any other +case, validateKeyVal will try to decode the PEM block. If this succeeds, +we have a valid PEM block in our KeyVal struct. On success it will return nil +on failure it will return the corresponding error. This can be either +an ErrInvalidHexString, an ErrNoPEMBlock or an ErrUnsupportedKeyType +if the KeyType is unknown. +*/ +func validateKeyVal(key Key) error { + switch key.KeyType { + case ed25519KeyType: + // We cannot use matchPublicKeyKeyType or matchPrivateKeyKeyType here, + // because we retrieve the key not from PEM. Hence we are dealing with + // plain ed25519 key bytes. These bytes can't be typechecked like in the + // matchKeyKeytype functions. + err := validateHexString(key.KeyVal.Public) + if err != nil { + return err + } + if key.KeyVal.Private != "" { + err := validateHexString(key.KeyVal.Private) + if err != nil { + return err + } + } + case rsaKeyType, ecdsaKeyType: + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public)) + if err != nil { + return err + } + err = matchPublicKeyKeyType(parsedKey, key.KeyType) + if err != nil { + return err + } + if key.KeyVal.Private != "" { + // We do not need the pemData here, so we can throw it away via '_' + _, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private)) + if err != nil { + return err + } + err = matchPrivateKeyKeyType(parsedKey, key.KeyType) + if err != nil { + return err + } + } + default: + return ErrUnsupportedKeyType + } + return nil +} + +/* +matchPublicKeyKeyType validates an interface if it can be asserted to a +the RSA or ECDSA public key type. We can only check RSA and ECDSA this way, +because we are storing them in PEM format. Ed25519 keys are stored as plain +ed25519 keys encoded as hex strings, thus we have no metadata for them. +This function will return nil on success. If the key type does not match +it will return an ErrKeyKeyTypeMismatch. +*/ +func matchPublicKeyKeyType(key interface{}, keyType string) error { + switch key.(type) { + case *rsa.PublicKey: + if keyType != rsaKeyType { + return ErrKeyKeyTypeMismatch + } + case *ecdsa.PublicKey: + if keyType != ecdsaKeyType { + return ErrKeyKeyTypeMismatch + } + default: + return ErrInvalidKey + } + return nil +} + +/* +matchPrivateKeyKeyType validates an interface if it can be asserted to a +the RSA or ECDSA private key type. We can only check RSA and ECDSA this way, +because we are storing them in PEM format. Ed25519 keys are stored as plain +ed25519 keys encoded as hex strings, thus we have no metadata for them. +This function will return nil on success. If the key type does not match +it will return an ErrKeyKeyTypeMismatch. +*/ +func matchPrivateKeyKeyType(key interface{}, keyType string) error { + // we can only check RSA and ECDSA this way, because we are storing them in PEM + // format. ed25519 keys are stored as plain ed25519 keys encoded as hex strings + // so we have no metadata for them. + switch key.(type) { + case *rsa.PrivateKey: + if keyType != rsaKeyType { + return ErrKeyKeyTypeMismatch + } + case *ecdsa.PrivateKey: + if keyType != ecdsaKeyType { + return ErrKeyKeyTypeMismatch + } + default: + return ErrInvalidKey + } + return nil +} + +/* +matchKeyTypeScheme checks if the specified scheme matches our specified +keyType. If the keyType is not supported it will return an +ErrUnsupportedKeyType. If the keyType and scheme do not match it will return +an ErrSchemeKeyTypeMismatch. If the specified keyType and scheme are +compatible matchKeyTypeScheme will return nil. +*/ +func matchKeyTypeScheme(key Key) error { + switch key.KeyType { + case rsaKeyType: + for _, scheme := range getSupportedRSASchemes() { + if key.Scheme == scheme { + return nil + } + } + case ed25519KeyType: + for _, scheme := range getSupportedEd25519Schemes() { + if key.Scheme == scheme { + return nil + } + } + case ecdsaKeyType: + for _, scheme := range getSupportedEcdsaSchemes() { + if key.Scheme == scheme { + return nil + } + } + default: + return fmt.Errorf("%w: %s", ErrUnsupportedKeyType, key.KeyType) + } + return ErrSchemeKeyTypeMismatch +} + +/* +validateKey checks the outer key object (everything, except the KeyVal struct). +It verifies the keyID for being a hex string and checks for empty fields. +On success it will return nil, on error it will return the corresponding error. +Either: ErrEmptyKeyField or ErrInvalidHexString. +*/ +func validateKey(key Key) error { + err := validateHexString(key.KeyID) + if err != nil { + return err + } + // This probably can be done more elegant with reflection + // but we care about performance, do we?! + if key.KeyType == "" { + return fmt.Errorf("%w: keytype", ErrEmptyKeyField) + } + if key.KeyVal.Public == "" && key.KeyVal.Certificate == "" { + return fmt.Errorf("%w: keyval.public and keyval.certificate cannot both be blank", ErrEmptyKeyField) + } + if key.Scheme == "" { + return fmt.Errorf("%w: scheme", ErrEmptyKeyField) + } + err = matchKeyTypeScheme(key) + if err != nil { + return err + } + // only check for supported KeyIDHashAlgorithms, if the variable has been set + if key.KeyIDHashAlgorithms != nil { + supportedKeyIDHashAlgorithms := getSupportedKeyIDHashAlgorithms() + if !supportedKeyIDHashAlgorithms.IsSubSet(NewSet(key.KeyIDHashAlgorithms...)) { + return fmt.Errorf("%w: %#v, supported are: %#v", ErrUnsupportedKeyIDHashAlgorithms, key.KeyIDHashAlgorithms, getSupportedKeyIDHashAlgorithms()) + } + } + return nil +} + +/* +validatePublicKey is a wrapper around validateKey. It test if the private key +value in the key is empty and then validates the key via calling validateKey. +On success it will return nil, on error it will return an ErrNoPublicKey error. +*/ +func validatePublicKey(key Key) error { + if key.KeyVal.Private != "" { + return ErrNoPublicKey + } + err := validateKey(key) + if err != nil { + return err + } + return nil +} + +/* +Signature represents a generic in-toto signature that contains the identifier +of the Key, which was used to create the signature and the signature data. The +used signature scheme is found in the corresponding Key. +*/ +type Signature struct { + KeyID string `json:"keyid"` + Sig string `json:"sig"` + Certificate string `json:"cert,omitempty"` +} + +// GetCertificate returns the parsed x509 certificate attached to the signature, +// if it exists. +func (sig Signature) GetCertificate() (Key, error) { + key := Key{} + if len(sig.Certificate) == 0 { + return key, errors.New("Signature has empty Certificate") + } + + err := key.LoadKeyReaderDefaults(strings.NewReader(sig.Certificate)) + return key, err +} + +/* +validateSignature is a function used to check if a passed signature is valid, +by inspecting the key ID and the signature itself. +*/ +func validateSignature(signature Signature) error { + if err := validateHexString(signature.KeyID); err != nil { + return err + } + if err := validateHexString(signature.Sig); err != nil { + return err + } + return nil +} + +/* +validateSliceOfSignatures is a helper function used to validate multiple +signatures stored in a slice. +*/ +func validateSliceOfSignatures(slice []Signature) error { + for _, signature := range slice { + if err := validateSignature(signature); err != nil { + return err + } + } + return nil +} + +/* +Link represents the evidence of a supply chain step performed by a functionary. +It should be contained in a generic Metablock object, which provides +functionality for signing and signature verification, and reading from and +writing to disk. +*/ +type Link struct { + Type string `json:"_type"` + Name string `json:"name"` + Materials map[string]interface{} `json:"materials"` + Products map[string]interface{} `json:"products"` + ByProducts map[string]interface{} `json:"byproducts"` + Command []string `json:"command"` + Environment map[string]interface{} `json:"environment"` +} + +/* +validateArtifacts is a general function used to validate products and materials. +*/ +func validateArtifacts(artifacts map[string]interface{}) error { + for artifactName, artifact := range artifacts { + artifactValue := reflect.ValueOf(artifact).MapRange() + for artifactValue.Next() { + value := artifactValue.Value().Interface().(string) + hashType := artifactValue.Key().Interface().(string) + if err := validateHexString(value); err != nil { + return fmt.Errorf("in artifact '%s', %s hash value: %s", + artifactName, hashType, err.Error()) + } + } + } + return nil +} + +/* +validateLink is a function used to ensure that a passed item of type Link +matches the necessary format. +*/ +func validateLink(link Link) error { + if link.Type != "link" { + return fmt.Errorf("invalid type for link '%s': should be 'link'", + link.Name) + } + + if err := validateArtifacts(link.Materials); err != nil { + return fmt.Errorf("in materials of link '%s': %s", link.Name, + err.Error()) + } + + if err := validateArtifacts(link.Products); err != nil { + return fmt.Errorf("in products of link '%s': %s", link.Name, + err.Error()) + } + + return nil +} + +/* +LinkNameFormat represents a format string used to create the filename for a +signed Link (wrapped in a Metablock). It consists of the name of the link and +the first 8 characters of the signing key id. E.g.: + + fmt.Sprintf(LinkNameFormat, "package", + "2f89b9272acfc8f4a0a0f094d789fdb0ba798b0fe41f2f5f417c12f0085ff498") + // returns "package.2f89b9272.link" +*/ +const LinkNameFormat = "%s.%.8s.link" +const PreliminaryLinkNameFormat = ".%s.%.8s.link-unfinished" + +/* +LinkNameFormatShort is for links that are not signed, e.g.: + + fmt.Sprintf(LinkNameFormatShort, "unsigned") + // returns "unsigned.link" +*/ +const LinkNameFormatShort = "%s.link" +const LinkGlobFormat = "%s.????????.link" + +/* +SublayoutLinkDirFormat represents the format of the name of the directory for +sublayout links during the verification workflow. +*/ +const SublayoutLinkDirFormat = "%s.%.8s" + +/* +SupplyChainItem summarizes common fields of the two available supply chain +item types, Inspection and Step. +*/ +type SupplyChainItem struct { + Name string `json:"name"` + ExpectedMaterials [][]string `json:"expected_materials"` + ExpectedProducts [][]string `json:"expected_products"` +} + +/* +validateArtifactRule calls UnpackRule to validate that the passed rule conforms +with any of the available rule formats. +*/ +func validateArtifactRule(rule []string) error { + if _, err := UnpackRule(rule); err != nil { + return err + } + return nil +} + +/* +validateSliceOfArtifactRules iterates over passed rules to validate them. +*/ +func validateSliceOfArtifactRules(rules [][]string) error { + for _, rule := range rules { + if err := validateArtifactRule(rule); err != nil { + return err + } + } + return nil +} + +/* +validateSupplyChainItem is used to validate the common elements found in both +steps and inspections. Here, the function primarily ensures that the name of +a supply chain item isn't empty. +*/ +func validateSupplyChainItem(item SupplyChainItem) error { + if item.Name == "" { + return fmt.Errorf("name cannot be empty") + } + + if err := validateSliceOfArtifactRules(item.ExpectedMaterials); err != nil { + return fmt.Errorf("invalid material rule: %s", err) + } + if err := validateSliceOfArtifactRules(item.ExpectedProducts); err != nil { + return fmt.Errorf("invalid product rule: %s", err) + } + return nil +} + +/* +Inspection represents an in-toto supply chain inspection, whose command in the +Run field is executed during final product verification, generating unsigned +link metadata. Materials and products used/produced by the inspection are +constrained by the artifact rules in the inspection's ExpectedMaterials and +ExpectedProducts fields. +*/ +type Inspection struct { + Type string `json:"_type"` + Run []string `json:"run"` + SupplyChainItem +} + +/* +validateInspection ensures that a passed inspection is valid and matches the +necessary format of an inspection. +*/ +func validateInspection(inspection Inspection) error { + if err := validateSupplyChainItem(inspection.SupplyChainItem); err != nil { + return fmt.Errorf("inspection %s", err.Error()) + } + if inspection.Type != "inspection" { + return fmt.Errorf("invalid Type value for inspection '%s': should be "+ + "'inspection'", inspection.SupplyChainItem.Name) + } + return nil +} + +/* +Step represents an in-toto step of the supply chain performed by a functionary. +During final product verification in-toto looks for corresponding Link +metadata, which is used as signed evidence that the step was performed +according to the supply chain definition. Materials and products used/produced +by the step are constrained by the artifact rules in the step's +ExpectedMaterials and ExpectedProducts fields. +*/ +type Step struct { + Type string `json:"_type"` + PubKeys []string `json:"pubkeys"` + CertificateConstraints []CertificateConstraint `json:"cert_constraints,omitempty"` + ExpectedCommand []string `json:"expected_command"` + Threshold int `json:"threshold"` + SupplyChainItem +} + +// CheckCertConstraints returns true if the provided certificate matches at least one +// of the constraints for this step. +func (s Step) CheckCertConstraints(key Key, rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) error { + if len(s.CertificateConstraints) == 0 { + return fmt.Errorf("no constraints found") + } + + _, possibleCert, err := decodeAndParse([]byte(key.KeyVal.Certificate)) + if err != nil { + return err + } + + cert, ok := possibleCert.(*x509.Certificate) + if !ok { + return fmt.Errorf("not a valid certificate") + } + + for _, constraint := range s.CertificateConstraints { + err = constraint.Check(cert, rootCAIDs, rootCertPool, intermediateCertPool) + if err == nil { + return nil + } + } + if err != nil { + return err + } + + // this should not be reachable since there is at least one constraint, and the for loop only saw err != nil + return fmt.Errorf("unknown certificate constraint error") +} + +/* +validateStep ensures that a passed step is valid and matches the +necessary format of an step. +*/ +func validateStep(step Step) error { + if err := validateSupplyChainItem(step.SupplyChainItem); err != nil { + return fmt.Errorf("step %s", err.Error()) + } + if step.Type != "step" { + return fmt.Errorf("invalid Type value for step '%s': should be 'step'", + step.SupplyChainItem.Name) + } + for _, keyID := range step.PubKeys { + if err := validateHexString(keyID); err != nil { + return err + } + } + return nil +} + +/* +ISO8601DateSchema defines the format string of a timestamp following the +ISO 8601 standard. +*/ +const ISO8601DateSchema = "2006-01-02T15:04:05Z" + +/* +Layout represents the definition of a software supply chain. It lists the +sequence of steps required in the software supply chain and the functionaries +authorized to perform these steps. Functionaries are identified by their +public keys. In addition, the layout may list a sequence of inspections that +are executed during in-toto supply chain verification. A layout should be +contained in a generic Metablock object, which provides functionality for +signing and signature verification, and reading from and writing to disk. +*/ +type Layout struct { + Type string `json:"_type"` + Steps []Step `json:"steps"` + Inspect []Inspection `json:"inspect"` + Keys map[string]Key `json:"keys"` + RootCas map[string]Key `json:"rootcas,omitempty"` + IntermediateCas map[string]Key `json:"intermediatecas,omitempty"` + Expires string `json:"expires"` + Readme string `json:"readme"` +} + +// Go does not allow to pass `[]T` (slice with certain type) to a function +// that accepts `[]interface{}` (slice with generic type) +// We have to manually create the interface slice first, see +// https://golang.org/doc/faq#convert_slice_of_interface +// TODO: Is there a better way to do polymorphism for steps and inspections? +func (l *Layout) stepsAsInterfaceSlice() []interface{} { + stepsI := make([]interface{}, len(l.Steps)) + for i, v := range l.Steps { + stepsI[i] = v + } + return stepsI +} +func (l *Layout) inspectAsInterfaceSlice() []interface{} { + inspectionsI := make([]interface{}, len(l.Inspect)) + for i, v := range l.Inspect { + inspectionsI[i] = v + } + return inspectionsI +} + +// RootCAIDs returns a slice of all of the Root CA IDs +func (l *Layout) RootCAIDs() []string { + rootCAIDs := make([]string, 0, len(l.RootCas)) + for rootCAID := range l.RootCas { + rootCAIDs = append(rootCAIDs, rootCAID) + } + return rootCAIDs +} + +func validateLayoutKeys(keys map[string]Key) error { + for keyID, key := range keys { + if key.KeyID != keyID { + return fmt.Errorf("invalid key found") + } + err := validatePublicKey(key) + if err != nil { + return err + } + } + + return nil +} + +/* +validateLayout is a function used to ensure that a passed item of type Layout +matches the necessary format. +*/ +func validateLayout(layout Layout) error { + if layout.Type != "layout" { + return fmt.Errorf("invalid Type value for layout: should be 'layout'") + } + + if _, err := time.Parse(ISO8601DateSchema, layout.Expires); err != nil { + return fmt.Errorf("expiry time parsed incorrectly - date either" + + " invalid or of incorrect format") + } + + if err := validateLayoutKeys(layout.Keys); err != nil { + return err + } + + if err := validateLayoutKeys(layout.RootCas); err != nil { + return err + } + + if err := validateLayoutKeys(layout.IntermediateCas); err != nil { + return err + } + + var namesSeen = make(map[string]bool) + for _, step := range layout.Steps { + if namesSeen[step.Name] { + return fmt.Errorf("non unique step or inspection name found") + } + + namesSeen[step.Name] = true + + if err := validateStep(step); err != nil { + return err + } + } + for _, inspection := range layout.Inspect { + if namesSeen[inspection.Name] { + return fmt.Errorf("non unique step or inspection name found") + } + + namesSeen[inspection.Name] = true + } + return nil +} + +/* +Metablock is a generic container for signable in-toto objects such as Layout +or Link. It has two fields, one that contains the signable object and one that +contains corresponding signatures. Metablock also provides functionality for +signing and signature verification, and reading from and writing to disk. +*/ +type Metablock struct { + // NOTE: Whenever we want to access an attribute of `Signed` we have to + // perform type assertion, e.g. `metablock.Signed.(Layout).Keys` + // Maybe there is a better way to store either Layouts or Links in `Signed`? + // The notary folks seem to have separate container structs: + // https://github.com/theupdateframework/notary/blob/master/tuf/data/root.go#L10-L14 + // https://github.com/theupdateframework/notary/blob/master/tuf/data/targets.go#L13-L17 + // I implemented it this way, because there will be several functions that + // receive or return a Metablock, where the type of Signed has to be inferred + // on runtime, e.g. when iterating over links for a layout, and a link can + // turn out to be a layout (sublayout) + Signed interface{} `json:"signed"` + Signatures []Signature `json:"signatures"` +} + +type jsonField struct { + name string + omitempty bool +} + +/* +checkRequiredJSONFields checks that the passed map (obj) has keys for each of +the json tags in the passed struct type (typ), and returns an error otherwise. +Any json tags that contain the "omitempty" option be allowed to be optional. +*/ +func checkRequiredJSONFields(obj map[string]interface{}, + typ reflect.Type) error { + + // Create list of json tags, e.g. `json:"_type"` + attributeCount := typ.NumField() + allFields := make([]jsonField, 0) + for i := 0; i < attributeCount; i++ { + fieldStr := typ.Field(i).Tag.Get("json") + field := jsonField{ + name: fieldStr, + omitempty: false, + } + + if idx := strings.Index(fieldStr, ","); idx != -1 { + field.name = fieldStr[:idx] + field.omitempty = strings.Contains(fieldStr[idx+1:], "omitempty") + } + + allFields = append(allFields, field) + } + + // Assert that there's a key in the passed map for each tag + for _, field := range allFields { + if _, ok := obj[field.name]; !ok && !field.omitempty { + return fmt.Errorf("required field %s missing", field.name) + } + } + return nil +} + +/* +Load parses JSON formatted metadata at the passed path into the Metablock +object on which it was called. It returns an error if it cannot parse +a valid JSON formatted Metablock that contains a Link or Layout. +*/ +func (mb *Metablock) Load(path string) error { + // Open file and close before returning + jsonFile, err := os.Open(path) + if err != nil { + return err + } + defer jsonFile.Close() + + // Read entire file + jsonBytes, err := ioutil.ReadAll(jsonFile) + if err != nil { + return err + } + + // Unmarshal JSON into a map of raw messages (signed and signatures) + // We can't fully unmarshal immediately, because we need to inspect the + // type (link or layout) to decide which data structure to use + var rawMb map[string]*json.RawMessage + if err := json.Unmarshal(jsonBytes, &rawMb); err != nil { + return err + } + + // Error out on missing `signed` or `signatures` field or if + // one of them has a `null` value, which would lead to a nil pointer + // dereference in Unmarshal below. + if rawMb["signed"] == nil || rawMb["signatures"] == nil { + return fmt.Errorf("in-toto metadata requires 'signed' and" + + " 'signatures' parts") + } + + // Fully unmarshal signatures part + if err := json.Unmarshal(*rawMb["signatures"], &mb.Signatures); err != nil { + return err + } + + // Temporarily copy signed to opaque map to inspect the `_type` of signed + // and create link or layout accordingly + var signed map[string]interface{} + if err := json.Unmarshal(*rawMb["signed"], &signed); err != nil { + return err + } + + if signed["_type"] == "link" { + var link Link + if err := checkRequiredJSONFields(signed, reflect.TypeOf(link)); err != nil { + return err + } + + data, err := rawMb["signed"].MarshalJSON() + if err != nil { + return err + } + decoder := json.NewDecoder(strings.NewReader(string(data))) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&link); err != nil { + return err + } + mb.Signed = link + + } else if signed["_type"] == "layout" { + var layout Layout + if err := checkRequiredJSONFields(signed, reflect.TypeOf(layout)); err != nil { + return err + } + + data, err := rawMb["signed"].MarshalJSON() + if err != nil { + return err + } + decoder := json.NewDecoder(strings.NewReader(string(data))) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&layout); err != nil { + return err + } + + mb.Signed = layout + + } else { + return fmt.Errorf("the '_type' field of the 'signed' part of in-toto" + + " metadata must be one of 'link' or 'layout'") + } + + return jsonFile.Close() +} + +/* +Dump JSON serializes and writes the Metablock on which it was called to the +passed path. It returns an error if JSON serialization or writing fails. +*/ +func (mb *Metablock) Dump(path string) error { + // JSON encode Metablock formatted with newlines and indentation + // TODO: parametrize format + jsonBytes, err := json.MarshalIndent(mb, "", " ") + if err != nil { + return err + } + + // Write JSON bytes to the passed path with permissions (-rw-r--r--) + err = ioutil.WriteFile(path, jsonBytes, 0644) + if err != nil { + return err + } + + return nil +} + +/* +GetSignableRepresentation returns the canonical JSON representation of the +Signed field of the Metablock on which it was called. If canonicalization +fails the first return value is nil and the second return value is the error. +*/ +func (mb *Metablock) GetSignableRepresentation() ([]byte, error) { + return cjson.EncodeCanonical(mb.Signed) +} + +/* +VerifySignature verifies the first signature, corresponding to the passed Key, +that it finds in the Signatures field of the Metablock on which it was called. +It returns an error if Signatures does not contain a Signature corresponding to +the passed Key, the object in Signed cannot be canonicalized, or the Signature +is invalid. +*/ +func (mb *Metablock) VerifySignature(key Key) error { + sig, err := mb.GetSignatureForKeyID(key.KeyID) + if err != nil { + return err + } + + dataCanonical, err := mb.GetSignableRepresentation() + if err != nil { + return err + } + + if err := VerifySignature(key, sig, dataCanonical); err != nil { + return err + } + return nil +} + +// GetSignatureForKeyID returns the signature that was created by the provided keyID, if it exists. +func (mb *Metablock) GetSignatureForKeyID(keyID string) (Signature, error) { + for _, s := range mb.Signatures { + if s.KeyID == keyID { + return s, nil + } + } + + return Signature{}, fmt.Errorf("no signature found for key '%s'", keyID) +} + +/* +ValidateMetablock ensures that a passed Metablock object is valid. It indirectly +validates the Link or Layout that the Metablock object contains. +*/ +func ValidateMetablock(mb Metablock) error { + switch mbSignedType := mb.Signed.(type) { + case Layout: + if err := validateLayout(mb.Signed.(Layout)); err != nil { + return err + } + case Link: + if err := validateLink(mb.Signed.(Link)); err != nil { + return err + } + default: + return fmt.Errorf("unknown type '%s', should be 'layout' or 'link'", + mbSignedType) + } + + if err := validateSliceOfSignatures(mb.Signatures); err != nil { + return err + } + + return nil +} + +/* +Sign creates a signature over the signed portion of the metablock using the Key +object provided. It then appends the resulting signature to the signatures +field as provided. It returns an error if the Signed object cannot be +canonicalized, or if the key is invalid or not supported. +*/ +func (mb *Metablock) Sign(key Key) error { + + dataCanonical, err := mb.GetSignableRepresentation() + if err != nil { + return err + } + + newSignature, err := GenerateSignature(dataCanonical, key) + if err != nil { + return err + } + + mb.Signatures = append(mb.Signatures, newSignature) + return nil +} + +// Subject describes the set of software artifacts the statement applies to. +type Subject struct { + Name string `json:"name"` + Digest common.DigestSet `json:"digest"` +} + +// StatementHeader defines the common fields for all statements +type StatementHeader struct { + Type string `json:"_type"` + PredicateType string `json:"predicateType"` + Subject []Subject `json:"subject"` +} + +/* +Statement binds the attestation to a particular subject and identifies the +of the predicate. This struct represents a generic statement. +*/ +type Statement struct { + StatementHeader + // Predicate contains type speficic metadata. + Predicate interface{} `json:"predicate"` +} + +// ProvenanceStatementSLSA01 is the definition for an entire provenance statement with SLSA 0.1 predicate. +type ProvenanceStatementSLSA01 struct { + StatementHeader + Predicate slsa01.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatementSLSA02 is the definition for an entire provenance statement with SLSA 0.2 predicate. +type ProvenanceStatementSLSA02 struct { + StatementHeader + Predicate slsa02.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatement is the definition for an entire provenance statement with SLSA 0.2 predicate. +// Deprecated: Only version-specific provenance structs will be maintained (ProvenanceStatementSLSA01, ProvenanceStatementSLSA02). +type ProvenanceStatement struct { + StatementHeader + Predicate slsa02.ProvenancePredicate `json:"predicate"` +} + +// LinkStatement is the definition for an entire link statement. +type LinkStatement struct { + StatementHeader + Predicate Link `json:"predicate"` +} + +/* +SPDXStatement is the definition for an entire SPDX statement. +This is currently not implemented. Some tooling exists here: +https://github.com/spdx/tools-golang, but this software is still in +early state. +This struct is the same as the generic Statement struct but is added for +completeness +*/ +type SPDXStatement struct { + StatementHeader + Predicate interface{} `json:"predicate"` +} + +/* +CycloneDXStatement defines a cyclonedx sbom in the predicate. It is not +currently serialized just as its SPDX counterpart. It is an empty +interface, like the generic Statement. +*/ +type CycloneDXStatement struct { + StatementHeader + Predicate interface{} `json:"predicate"` +} + +/* +DSSESigner provides signature generation and validation based on the SSL +Signing Spec: https://github.com/secure-systems-lab/signing-spec +as describe by: https://github.com/MarkLodato/ITE/tree/media-type/ITE/5 +It wraps the generic SSL envelope signer and enforces the correct payload +type both during signature generation and validation. +*/ +type DSSESigner struct { + signer *dsse.EnvelopeSigner +} + +func NewDSSESigner(p ...dsse.SignVerifier) (*DSSESigner, error) { + es, err := dsse.NewEnvelopeSigner(p...) + if err != nil { + return nil, err + } + + return &DSSESigner{ + signer: es, + }, nil +} + +func (s *DSSESigner) SignPayload(body []byte) (*dsse.Envelope, error) { + return s.signer.SignPayload(PayloadType, body) +} + +func (s *DSSESigner) Verify(e *dsse.Envelope) error { + if e.PayloadType != PayloadType { + return ErrInvalidPayloadType + } + + _, err := s.signer.Verify(e) + return err +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go new file mode 100644 index 0000000000..1bba77c39e --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go @@ -0,0 +1,131 @@ +package in_toto + +import ( + "fmt" + "strings" +) + +// An error message issued in UnpackRule if it receives a malformed rule. +var errorMsg = "Wrong rule format, available formats are:\n" + + "\tMATCH [IN ] WITH (MATERIALS|PRODUCTS)" + + " [IN ] FROM ,\n" + + "\tCREATE ,\n" + + "\tDELETE ,\n" + + "\tMODIFY ,\n" + + "\tALLOW ,\n" + + "\tDISALLOW ,\n" + + "\tREQUIRE \n\n" + +/* +UnpackRule parses the passed rule and extracts and returns the information +required for rule processing. It can be used to verify if a rule has a valid +format. Available rule formats are: + + MATCH [IN ] WITH (MATERIALS|PRODUCTS) + [IN ] FROM , + CREATE , + DELETE , + MODIFY , + ALLOW , + DISALLOW + +Rule tokens are normalized to lower case before returning. The returned map +has the following format: + + { + "type": "match" | "create" | "delete" |"modify" | "allow" | "disallow" + "pattern": "", + "srcPrefix": "", // MATCH rule only + "dstPrefix": "", // MATCH rule only + "dstType": "materials" | "products">, // MATCH rule only + "dstName": "", // Match rule only + } + +If the rule does not match any of the available formats the first return value +is nil and the second return value is the error. +*/ +func UnpackRule(rule []string) (map[string]string, error) { + // Cache rule len + ruleLen := len(rule) + + // Create all lower rule copy to case-insensitively parse out tokens whose + // position we don't know yet. We keep the original rule to retain the + // non-token elements' case. + ruleLower := make([]string, ruleLen) + for i, val := range rule { + ruleLower[i] = strings.ToLower(val) + } + + switch ruleLower[0] { + case "create", "modify", "delete", "allow", "disallow", "require": + if ruleLen != 2 { + return nil, + fmt.Errorf("%s Got:\n\t %s", errorMsg, rule) + } + + return map[string]string{ + "type": ruleLower[0], + "pattern": rule[1], + }, nil + + case "match": + var srcPrefix string + var dstType string + var dstPrefix string + var dstName string + + // MATCH IN WITH (MATERIALS|PRODUCTS) \ + // IN FROM + if ruleLen == 10 && ruleLower[2] == "in" && + ruleLower[4] == "with" && ruleLower[6] == "in" && + ruleLower[8] == "from" { + srcPrefix = rule[3] + dstType = ruleLower[5] + dstPrefix = rule[7] + dstName = rule[9] + // MATCH IN WITH (MATERIALS|PRODUCTS) \ + // FROM + } else if ruleLen == 8 && ruleLower[2] == "in" && + ruleLower[4] == "with" && ruleLower[6] == "from" { + srcPrefix = rule[3] + dstType = ruleLower[5] + dstPrefix = "" + dstName = rule[7] + + // MATCH WITH (MATERIALS|PRODUCTS) IN + // FROM + } else if ruleLen == 8 && ruleLower[2] == "with" && + ruleLower[4] == "in" && ruleLower[6] == "from" { + srcPrefix = "" + dstType = ruleLower[3] + dstPrefix = rule[5] + dstName = rule[7] + + // MATCH WITH (MATERIALS|PRODUCTS) FROM + } else if ruleLen == 6 && ruleLower[2] == "with" && + ruleLower[4] == "from" { + srcPrefix = "" + dstType = ruleLower[3] + dstPrefix = "" + dstName = rule[5] + + } else { + return nil, + fmt.Errorf("%s Got:\n\t %s", errorMsg, rule) + + } + + return map[string]string{ + "type": ruleLower[0], + "pattern": rule[1], + "srcPrefix": srcPrefix, + "dstPrefix": dstPrefix, + "dstType": dstType, + "dstName": dstName, + }, nil + + default: + return nil, + fmt.Errorf("%s Got:\n\t %s", errorMsg, rule) + } +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go new file mode 100644 index 0000000000..87e6905070 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go @@ -0,0 +1,409 @@ +package in_toto + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "reflect" + "strings" + "syscall" + + "github.com/shibumi/go-pathspec" +) + +// ErrSymCycle signals a detected symlink cycle in our RecordArtifacts() function. +var ErrSymCycle = errors.New("symlink cycle detected") + +// ErrUnsupportedHashAlgorithm signals a missing hash mapping in getHashMapping +var ErrUnsupportedHashAlgorithm = errors.New("unsupported hash algorithm detected") + +var ErrEmptyCommandArgs = errors.New("the command args are empty") + +// visitedSymlinks is a hashset that contains all paths that we have visited. +var visitedSymlinks Set + +/* +RecordArtifact reads and hashes the contents of the file at the passed path +using sha256 and returns a map in the following format: + + { + "": { + "sha256": + } + } + +If reading the file fails, the first return value is nil and the second return +value is the error. +NOTE: For cross-platform consistency Windows-style line separators (CRLF) are +normalized to Unix-style line separators (LF) before hashing file contents. +*/ +func RecordArtifact(path string, hashAlgorithms []string, lineNormalization bool) (map[string]interface{}, error) { + supportedHashMappings := getHashMapping() + // Read file from passed path + contents, err := ioutil.ReadFile(path) + hashedContentsMap := make(map[string]interface{}) + if err != nil { + return nil, err + } + + if lineNormalization { + // "Normalize" file contents. We convert all line separators to '\n' + // for keeping operating system independence + contents = bytes.ReplaceAll(contents, []byte("\r\n"), []byte("\n")) + contents = bytes.ReplaceAll(contents, []byte("\r"), []byte("\n")) + } + + // Create a map of all the hashes present in the hash_func list + for _, element := range hashAlgorithms { + if _, ok := supportedHashMappings[element]; !ok { + return nil, fmt.Errorf("%w: %s", ErrUnsupportedHashAlgorithm, element) + } + h := supportedHashMappings[element] + result := fmt.Sprintf("%x", hashToHex(h(), contents)) + hashedContentsMap[element] = result + } + + // Return it in a format that is conformant with link metadata artifacts + return hashedContentsMap, nil +} + +/* +RecordArtifacts is a wrapper around recordArtifacts. +RecordArtifacts initializes a set for storing visited symlinks, +calls recordArtifacts and deletes the set if no longer needed. +recordArtifacts walks through the passed slice of paths, traversing +subdirectories, and calls RecordArtifact for each file. It returns a map in +the following format: + + { + "": { + "sha256": + }, + "": { + "sha256": + }, + ... + } + +If recording an artifact fails the first return value is nil and the second +return value is the error. +*/ +func RecordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (evalArtifacts map[string]interface{}, err error) { + // Make sure to initialize a fresh hashset for every RecordArtifacts call + visitedSymlinks = NewSet() + evalArtifacts, err = recordArtifacts(paths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + // pass result and error through + return evalArtifacts, err +} + +/* +recordArtifacts walks through the passed slice of paths, traversing +subdirectories, and calls RecordArtifact for each file. It returns a map in +the following format: + + { + "": { + "sha256": + }, + "": { + "sha256": + }, + ... + } + +If recording an artifact fails the first return value is nil and the second +return value is the error. +*/ +func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (map[string]interface{}, error) { + artifacts := make(map[string]interface{}) + for _, path := range paths { + err := filepath.Walk(path, + func(path string, info os.FileInfo, err error) error { + // Abort if Walk function has a problem, + // e.g. path does not exist + if err != nil { + return err + } + // We need to call pathspec.GitIgnore inside of our filepath.Walk, because otherwise + // we will not catch all paths. Just imagine a path like "." and a pattern like "*.pub". + // If we would call pathspec outside of the filepath.Walk this would not match. + ignore, err := pathspec.GitIgnore(gitignorePatterns, path) + if err != nil { + return err + } + if ignore { + return nil + } + // Don't hash directories + if info.IsDir() { + return nil + } + + // check for symlink and evaluate the last element in a symlink + // chain via filepath.EvalSymlinks. We use EvalSymlinks here, + // because with os.Readlink() we would just read the next + // element in a possible symlink chain. This would mean more + // iterations. infoMode()&os.ModeSymlink uses the file + // type bitmask to check for a symlink. + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + // return with error if we detect a symlink cycle + if ok := visitedSymlinks.Has(path); ok { + // this error will get passed through + // to RecordArtifacts() + return ErrSymCycle + } + evalSym, err := filepath.EvalSymlinks(path) + if err != nil { + return err + } + // add symlink to visitedSymlinks set + // this way, we know which link we have visited already + // if we visit a symlink twice, we have detected a symlink cycle + visitedSymlinks.Add(path) + // We recursively call RecordArtifacts() to follow + // the new path. + evalArtifacts, evalErr := recordArtifacts([]string{evalSym}, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + if evalErr != nil { + return evalErr + } + for key, value := range evalArtifacts { + artifacts[key] = value + } + return nil + } + artifact, err := RecordArtifact(path, hashAlgorithms, lineNormalization) + // Abort if artifact can't be recorded, e.g. + // due to file permissions + if err != nil { + return err + } + + for _, strip := range lStripPaths { + if strings.HasPrefix(path, strip) { + path = strings.TrimPrefix(path, strip) + break + } + } + // Check if path is unique + _, existingPath := artifacts[path] + if existingPath { + return fmt.Errorf("left stripping has resulted in non unique dictionary key: %s", path) + } + artifacts[path] = artifact + return nil + }) + + if err != nil { + return nil, err + } + } + + return artifacts, nil +} + +/* +waitErrToExitCode converts an error returned by Cmd.wait() to an exit code. It +returns -1 if no exit code can be inferred. +*/ +func waitErrToExitCode(err error) int { + // If there's no exit code, we return -1 + retVal := -1 + + // See https://stackoverflow.com/questions/10385551/get-exit-code-go + if err != nil { + if exiterr, ok := err.(*exec.ExitError); ok { + // The program has exited with an exit code != 0 + // This works on both Unix and Windows. Although package + // syscall is generally platform dependent, WaitStatus is + // defined for both Unix and Windows and in both cases has + // an ExitStatus() method with the same signature. + if status, ok := exiterr.Sys().(syscall.WaitStatus); ok { + retVal = status.ExitStatus() + } + } + } else { + retVal = 0 + } + + return retVal +} + +/* +RunCommand executes the passed command in a subprocess. The first element of +cmdArgs is used as executable and the rest as command arguments. It captures +and returns stdout, stderr and exit code. The format of the returned map is: + + { + "return-value": , + "stdout": "", + "stderr": "" + } + +If the command cannot be executed or no pipes for stdout or stderr can be +created the first return value is nil and the second return value is the error. +NOTE: Since stdout and stderr are captured, they cannot be seen during the +command execution. +*/ +func RunCommand(cmdArgs []string, runDir string) (map[string]interface{}, error) { + if len(cmdArgs) == 0 { + return nil, ErrEmptyCommandArgs + } + + cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...) + + if runDir != "" { + cmd.Dir = runDir + } + + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + + if err := cmd.Start(); err != nil { + return nil, err + } + + // TODO: duplicate stdout, stderr + stdout, _ := ioutil.ReadAll(stdoutPipe) + stderr, _ := ioutil.ReadAll(stderrPipe) + + retVal := waitErrToExitCode(cmd.Wait()) + + return map[string]interface{}{ + "return-value": float64(retVal), + "stdout": string(stdout), + "stderr": string(stderr), + }, nil +} + +/* +InTotoRun executes commands, e.g. for software supply chain steps or +inspections of an in-toto layout, and creates and returns corresponding link +metadata. Link metadata contains recorded products at the passed productPaths +and materials at the passed materialPaths. The returned link is wrapped in a +Metablock object. If command execution or artifact recording fails the first +return value is an empty Metablock and the second return value is the error. +*/ +func InTotoRun(name string, runDir string, materialPaths []string, productPaths []string, + cmdArgs []string, key Key, hashAlgorithms []string, gitignorePatterns []string, + lStripPaths []string, lineNormalization bool) (Metablock, error) { + var linkMb Metablock + + materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + if err != nil { + return linkMb, err + } + + // make sure that we only run RunCommand if cmdArgs is not nil or empty + byProducts := map[string]interface{}{} + if len(cmdArgs) != 0 { + byProducts, err = RunCommand(cmdArgs, runDir) + if err != nil { + return linkMb, err + } + } + + products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + if err != nil { + return linkMb, err + } + + linkMb.Signed = Link{ + Type: "link", + Name: name, + Materials: materials, + Products: products, + ByProducts: byProducts, + Command: cmdArgs, + Environment: map[string]interface{}{}, + } + + linkMb.Signatures = []Signature{} + // We use a new feature from Go1.13 here, to check the key struct. + // IsZero() will return True, if the key hasn't been initialized + + // with other values than the default ones. + if !reflect.ValueOf(key).IsZero() { + if err := linkMb.Sign(key); err != nil { + return linkMb, err + } + } + + return linkMb, nil +} + +/* +InTotoRecordStart begins the creation of a link metablock file in two steps, +in order to provide evidence for supply chain steps that cannot be carries out +by a single command. InTotoRecordStart collects the hashes of the materials +before any commands are run, signs the unfinished link, and returns the link. +*/ +func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) { + var linkMb Metablock + materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + if err != nil { + return linkMb, err + } + + linkMb.Signed = Link{ + Type: "link", + Name: name, + Materials: materials, + Products: map[string]interface{}{}, + ByProducts: map[string]interface{}{}, + Command: []string{}, + Environment: map[string]interface{}{}, + } + + if !reflect.ValueOf(key).IsZero() { + if err := linkMb.Sign(key); err != nil { + return linkMb, err + } + } + + return linkMb, nil +} + +/* +InTotoRecordStop ends the creation of a metatadata link file created by +InTotoRecordStart. InTotoRecordStop takes in a signed unfinished link metablock +created by InTotoRecordStart and records the hashes of any products creted by +commands run between InTotoRecordStart and InTotoRecordStop. The resultant +finished link metablock is then signed by the provided key and returned. +*/ +func InTotoRecordStop(prelimLinkMb Metablock, productPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) { + var linkMb Metablock + if err := prelimLinkMb.VerifySignature(key); err != nil { + return linkMb, err + } + + link, ok := prelimLinkMb.Signed.(Link) + if !ok { + return linkMb, errors.New("invalid metadata block") + } + + products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + if err != nil { + return linkMb, err + } + + link.Products = products + linkMb.Signed = link + + if !reflect.ValueOf(key).IsZero() { + if err := linkMb.Sign(key); err != nil { + return linkMb, err + } + } + + return linkMb, nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go new file mode 100644 index 0000000000..a45a454634 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go @@ -0,0 +1,16 @@ +package common + +// DigestSet contains a set of digests. It is represented as a map from +// algorithm name to lowercase hex-encoded value. +type DigestSet map[string]string + +// ProvenanceBuilder idenfifies the entity that executed the build steps. +type ProvenanceBuilder struct { + ID string `json:"id"` +} + +// ProvenanceMaterial defines the materials used to build an artifact. +type ProvenanceMaterial struct { + URI string `json:"uri,omitempty"` + Digest DigestSet `json:"digest,omitempty"` +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go new file mode 100644 index 0000000000..5978e9229d --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go @@ -0,0 +1,50 @@ +package v01 + +import ( + "time" + + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" +) + +const ( + // PredicateSLSAProvenance represents a build provenance for an artifact. + PredicateSLSAProvenance = "https://slsa.dev/provenance/v0.1" +) + +// ProvenancePredicate is the provenance predicate definition. +type ProvenancePredicate struct { + Builder common.ProvenanceBuilder `json:"builder"` + Recipe ProvenanceRecipe `json:"recipe"` + Metadata *ProvenanceMetadata `json:"metadata,omitempty"` + Materials []common.ProvenanceMaterial `json:"materials,omitempty"` +} + +// ProvenanceRecipe describes the actions performed by the builder. +type ProvenanceRecipe struct { + Type string `json:"type"` + // DefinedInMaterial can be sent as the null pointer to indicate that + // the value is not present. + DefinedInMaterial *int `json:"definedInMaterial,omitempty"` + EntryPoint string `json:"entryPoint"` + Arguments interface{} `json:"arguments,omitempty"` + Environment interface{} `json:"environment,omitempty"` +} + +// ProvenanceMetadata contains metadata for the built artifact. +type ProvenanceMetadata struct { + // Use pointer to make sure that the abscense of a time is not + // encoded as the Epoch time. + BuildStartedOn *time.Time `json:"buildStartedOn,omitempty"` + BuildFinishedOn *time.Time `json:"buildFinishedOn,omitempty"` + Completeness ProvenanceComplete `json:"completeness"` + Reproducible bool `json:"reproducible"` +} + +// ProvenanceComplete indicates wheter the claims in build/recipe are complete. +// For in depth information refer to the specifictaion: +// https://github.com/in-toto/attestation/blob/v0.1.0/spec/predicates/provenance.md +type ProvenanceComplete struct { + Arguments bool `json:"arguments"` + Environment bool `json:"environment"` + Materials bool `json:"materials"` +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go new file mode 100644 index 0000000000..5fca7abb73 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go @@ -0,0 +1,137 @@ +package v02 + +import ( + "time" + + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" +) + +const ( + // PredicateSLSAProvenance represents a build provenance for an artifact. + PredicateSLSAProvenance = "https://slsa.dev/provenance/v0.2" +) + +// ProvenancePredicate is the provenance predicate definition. +type ProvenancePredicate struct { + // Builder identifies the entity that executed the invocation, which is trusted to have + // correctly performed the operation and populated this provenance. + // + // The identity MUST reflect the trust base that consumers care about. How detailed to be is a + // judgement call. For example, GitHub Actions supports both GitHub-hosted runners and + // self-hosted runners. The GitHub-hosted runner might be a single identity because it’s all + // GitHub from the consumer’s perspective. Meanwhile, each self-hosted runner might have its + // own identity because not all runners are trusted by all consumers. + Builder common.ProvenanceBuilder `json:"builder"` + + // BuildType is a URI indicating what type of build was performed. It determines the meaning of + // [Invocation], [BuildConfig] and [Materials]. + BuildType string `json:"buildType"` + + // Invocation identifies the event that kicked off the build. When combined with materials, + // this SHOULD fully describe the build, such that re-running this invocation results in + // bit-for-bit identical output (if the build is reproducible). + // + // MAY be unset/null if unknown, but this is DISCOURAGED. + Invocation ProvenanceInvocation `json:"invocation,omitempty"` + + // BuildConfig lists the steps in the build. If [ProvenanceInvocation.ConfigSource] is not + // available, BuildConfig can be used to verify information about the build. + // + // This is an arbitrary JSON object with a schema defined by [BuildType]. + BuildConfig interface{} `json:"buildConfig,omitempty"` + + // Metadata contains other properties of the build. + Metadata *ProvenanceMetadata `json:"metadata,omitempty"` + + // Materials is the collection of artifacts that influenced the build including sources, + // dependencies, build tools, base images, and so on. + // + // This is considered to be incomplete unless metadata.completeness.materials is true. + Materials []common.ProvenanceMaterial `json:"materials,omitempty"` +} + +// ProvenanceInvocation identifies the event that kicked off the build. +type ProvenanceInvocation struct { + // ConfigSource describes where the config file that kicked off the build came from. This is + // effectively a pointer to the source where [ProvenancePredicate.BuildConfig] came from. + ConfigSource ConfigSource `json:"configSource,omitempty"` + + // Parameters is a collection of all external inputs that influenced the build on top of + // ConfigSource. For example, if the invocation type were “make”, then this might be the + // flags passed to make aside from the target, which is captured in [ConfigSource.EntryPoint]. + // + // Consumers SHOULD accept only “safe” Parameters. The simplest and safest way to + // achieve this is to disallow any parameters altogether. + // + // This is an arbitrary JSON object with a schema defined by buildType. + Parameters interface{} `json:"parameters,omitempty"` + + // Environment contains any other builder-controlled inputs necessary for correctly evaluating + // the build. Usually only needed for reproducing the build but not evaluated as part of + // policy. + // + // This SHOULD be minimized to only include things that are part of the public API, that cannot + // be recomputed from other values in the provenance, and that actually affect the evaluation + // of the build. For example, this might include variables that are referenced in the workflow + // definition, but it SHOULD NOT include a dump of all environment variables or include things + // like the hostname (assuming hostname is not part of the public API). + Environment interface{} `json:"environment,omitempty"` +} + +type ConfigSource struct { + // URI indicating the identity of the source of the config. + URI string `json:"uri,omitempty"` + // Digest is a collection of cryptographic digests for the contents of the artifact specified + // by [URI]. + Digest common.DigestSet `json:"digest,omitempty"` + // EntryPoint identifying the entry point into the build. This is often a path to a + // configuration file and/or a target label within that file. The syntax and meaning are + // defined by buildType. For example, if the buildType were “make”, then this would reference + // the directory in which to run make as well as which target to use. + // + // Consumers SHOULD accept only specific [ProvenanceInvocation.EntryPoint] values. For example, + // a policy might only allow the "release" entry point but not the "debug" entry point. + // MAY be omitted if the buildType specifies a default value. + EntryPoint string `json:"entryPoint,omitempty"` +} + +// ProvenanceMetadata contains metadata for the built artifact. +type ProvenanceMetadata struct { + // BuildInvocationID identifies this particular build invocation, which can be useful for + // finding associated logs or other ad-hoc analysis. The exact meaning and format is defined + // by [common.ProvenanceBuilder.ID]; by default it is treated as opaque and case-sensitive. + // The value SHOULD be globally unique. + BuildInvocationID string `json:"buildInvocationID,omitempty"` + + // BuildStartedOn is the timestamp of when the build started. + // + // Use pointer to make sure that the abscense of a time is not + // encoded as the Epoch time. + BuildStartedOn *time.Time `json:"buildStartedOn,omitempty"` + // BuildFinishedOn is the timestamp of when the build completed. + BuildFinishedOn *time.Time `json:"buildFinishedOn,omitempty"` + + // Completeness indicates that the builder claims certain fields in this message to be + // complete. + Completeness ProvenanceComplete `json:"completeness"` + + // Reproducible if true, means the builder claims that running invocation on materials will + // produce bit-for-bit identical output. + Reproducible bool `json:"reproducible"` +} + +// ProvenanceComplete indicates wheter the claims in build/recipe are complete. +// For in depth information refer to the specifictaion: +// https://github.com/in-toto/attestation/blob/v0.1.0/spec/predicates/provenance.md +type ProvenanceComplete struct { + // Parameters if true, means the builder claims that [ProvenanceInvocation.Parameters] is + // complete, meaning that all external inputs are properly captured in + // ProvenanceInvocation.Parameters. + Parameters bool `json:"parameters"` + // Environment if true, means the builder claims that [ProvenanceInvocation.Environment] is + // complete. + Environment bool `json:"environment"` + // Materials if true, means the builder claims that materials is complete, usually through some + // controls to prevent network access. Sometimes called “hermetic”. + Materials bool `json:"materials"` +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go new file mode 100644 index 0000000000..59cba86eb5 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go @@ -0,0 +1,147 @@ +package in_toto + +import ( + "fmt" +) + +/* +Set represents a data structure for set operations. See `NewSet` for how to +create a Set, and available Set receivers for useful set operations. + +Under the hood Set aliases map[string]struct{}, where the map keys are the set +elements and the map values are a memory-efficient way of storing the keys. +*/ +type Set map[string]struct{} + +/* +NewSet creates a new Set, assigns it the optionally passed variadic string +elements, and returns it. +*/ +func NewSet(elems ...string) Set { + var s Set = make(map[string]struct{}) + for _, elem := range elems { + s.Add(elem) + } + return s +} + +/* +Has returns True if the passed string is member of the set on which it was +called and False otherwise. +*/ +func (s Set) Has(elem string) bool { + _, ok := s[elem] + return ok +} + +/* +Add adds the passed string to the set on which it was called, if the string is +not a member of the set. +*/ +func (s Set) Add(elem string) { + s[elem] = struct{}{} +} + +/* +Remove removes the passed string from the set on which was is called, if the +string is a member of the set. +*/ +func (s Set) Remove(elem string) { + delete(s, elem) +} + +/* +Intersection creates and returns a new Set with the elements of the set on +which it was called that are also in the passed set. +*/ +func (s Set) Intersection(s2 Set) Set { + res := NewSet() + for elem := range s { + if !s2.Has(elem) { + continue + } + res.Add(elem) + } + return res +} + +/* +Difference creates and returns a new Set with the elements of the set on +which it was called that are not in the passed set. +*/ +func (s Set) Difference(s2 Set) Set { + res := NewSet() + for elem := range s { + if s2.Has(elem) { + continue + } + res.Add(elem) + } + return res +} + +/* +Filter creates and returns a new Set with the elements of the set on which it +was called that match the passed pattern. A matching error is treated like a +non-match plus a warning is printed. +*/ +func (s Set) Filter(pattern string) Set { + res := NewSet() + for elem := range s { + matched, err := match(pattern, elem) + if err != nil { + fmt.Printf("WARNING: %s, pattern was '%s'\n", err, pattern) + continue + } + if !matched { + continue + } + res.Add(elem) + } + return res +} + +/* +Slice creates and returns an unordered string slice with the elements of the +set on which it was called. +*/ +func (s Set) Slice() []string { + var res []string + res = make([]string, 0, len(s)) + for elem := range s { + res = append(res, elem) + } + return res +} + +/* +InterfaceKeyStrings returns string keys of passed interface{} map in an +unordered string slice. +*/ +func InterfaceKeyStrings(m map[string]interface{}) []string { + res := make([]string, len(m)) + i := 0 + for k := range m { + res[i] = k + i++ + } + return res +} + +/* +IsSubSet checks if the parameter subset is a +subset of the superset s. +*/ +func (s Set) IsSubSet(subset Set) bool { + if len(subset) > len(s) { + return false + } + for key := range subset { + if s.Has(key) { + continue + } else { + return false + } + } + return true +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go new file mode 100644 index 0000000000..f555f79a52 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go @@ -0,0 +1,14 @@ +//go:build linux || darwin || !windows +// +build linux darwin !windows + +package in_toto + +import "golang.org/x/sys/unix" + +func isWritable(path string) error { + err := unix.Access(path, unix.W_OK) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go new file mode 100644 index 0000000000..8552f0345d --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go @@ -0,0 +1,25 @@ +package in_toto + +import ( + "errors" + "os" +) + +func isWritable(path string) error { + // get fileInfo + info, err := os.Stat(path) + if err != nil { + return err + } + + // check if path is a directory + if !info.IsDir() { + return errors.New("not a directory") + } + + // Check if the user bit is enabled in file permission + if info.Mode().Perm()&(1<<(uint(7))) == 0 { + return errors.New("not writable") + } + return nil +} diff --git a/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go new file mode 100644 index 0000000000..2302040f46 --- /dev/null +++ b/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go @@ -0,0 +1,1091 @@ +/* +Package in_toto implements types and routines to verify a software supply chain +according to the in-toto specification. +See https://github.com/in-toto/docs/blob/master/in-toto-spec.md +*/ +package in_toto + +import ( + "crypto/x509" + "errors" + "fmt" + "io" + "os" + "path" + osPath "path" + "path/filepath" + "reflect" + "regexp" + "strings" + "time" +) + +// ErrInspectionRunDirIsSymlink gets thrown if the runDir is a symlink +var ErrInspectionRunDirIsSymlink = errors.New("runDir is a symlink. This is a security risk") + +/* +RunInspections iteratively executes the command in the Run field of all +inspections of the passed layout, creating unsigned link metadata that records +all files found in the current working directory as materials (before command +execution) and products (after command execution). A map with inspection names +as keys and Metablocks containing the generated link metadata as values is +returned. The format is: + + { + : Metablock, + : Metablock, + ... + } + +If executing the inspection command fails, or if the executed command has a +non-zero exit code, the first return value is an empty Metablock map and the +second return value is the error. +*/ +func RunInspections(layout Layout, runDir string, lineNormalization bool) (map[string]Metablock, error) { + inspectionMetadata := make(map[string]Metablock) + + for _, inspection := range layout.Inspect { + + paths := []string{"."} + if runDir != "" { + paths = []string{runDir} + } + + linkMb, err := InTotoRun(inspection.Name, runDir, paths, paths, + inspection.Run, Key{}, []string{"sha256"}, nil, nil, lineNormalization) + + if err != nil { + return nil, err + } + + retVal := linkMb.Signed.(Link).ByProducts["return-value"] + if retVal != float64(0) { + return nil, fmt.Errorf("inspection command '%s' of inspection '%s'"+ + " returned a non-zero value: %d", inspection.Run, inspection.Name, + retVal) + } + + // Dump inspection link to cwd using the short link name format + linkName := fmt.Sprintf(LinkNameFormatShort, inspection.Name) + if err := linkMb.Dump(linkName); err != nil { + fmt.Printf("JSON serialization or writing failed: %s", err) + } + + inspectionMetadata[inspection.Name] = linkMb + } + return inspectionMetadata, nil +} + +// verifyMatchRule is a helper function to process artifact rules of +// type MATCH. See VerifyArtifacts for more details. +func verifyMatchRule(ruleData map[string]string, + srcArtifacts map[string]interface{}, srcArtifactQueue Set, + itemsMetadata map[string]Metablock) Set { + consumed := NewSet() + // Get destination link metadata + dstLinkMb, exists := itemsMetadata[ruleData["dstName"]] + if !exists { + // Destination link does not exist, rule can't consume any + // artifacts + return consumed + } + + // Get artifacts from destination link metadata + var dstArtifacts map[string]interface{} + switch ruleData["dstType"] { + case "materials": + dstArtifacts = dstLinkMb.Signed.(Link).Materials + case "products": + dstArtifacts = dstLinkMb.Signed.(Link).Products + } + + // cleanup paths in pattern and artifact maps + if ruleData["pattern"] != "" { + ruleData["pattern"] = path.Clean(ruleData["pattern"]) + } + for k := range srcArtifacts { + if path.Clean(k) != k { + srcArtifacts[path.Clean(k)] = srcArtifacts[k] + delete(srcArtifacts, k) + } + } + for k := range dstArtifacts { + if path.Clean(k) != k { + dstArtifacts[path.Clean(k)] = dstArtifacts[k] + delete(dstArtifacts, k) + } + } + + // Normalize optional source and destination prefixes, i.e. if + // there is a prefix, then add a trailing slash if not there yet + for _, prefix := range []string{"srcPrefix", "dstPrefix"} { + if ruleData[prefix] != "" { + ruleData[prefix] = path.Clean(ruleData[prefix]) + if !strings.HasSuffix(ruleData[prefix], "/") { + ruleData[prefix] += "/" + } + } + } + // Iterate over queue and mark consumed artifacts + for srcPath := range srcArtifactQueue { + // Remove optional source prefix from source artifact path + // Noop if prefix is empty, or artifact does not have it + srcBasePath := strings.TrimPrefix(srcPath, ruleData["srcPrefix"]) + + // Ignore artifacts not matched by rule pattern + matched, err := match(ruleData["pattern"], srcBasePath) + if err != nil || !matched { + continue + } + + // Construct corresponding destination artifact path, i.e. + // an optional destination prefix plus the source base path + dstPath := path.Clean(osPath.Join(ruleData["dstPrefix"], srcBasePath)) + + // Try to find the corresponding destination artifact + dstArtifact, exists := dstArtifacts[dstPath] + // Ignore artifacts without corresponding destination artifact + if !exists { + continue + } + + // Ignore artifact pairs with no matching hashes + if !reflect.DeepEqual(srcArtifacts[srcPath], dstArtifact) { + continue + } + + // Only if a source and destination artifact pair was found and + // their hashes are equal, will we mark the source artifact as + // successfully consumed, i.e. it will be removed from the queue + consumed.Add(srcPath) + } + return consumed +} + +/* +VerifyArtifacts iteratively applies the material and product rules of the +passed items (step or inspection) to enforce and authorize artifacts (materials +or products) reported by the corresponding link and to guarantee that +artifacts are linked together across links. In the beginning all artifacts are +placed in a queue according to their type. If an artifact gets consumed by a +rule it is removed from the queue. An artifact can only be consumed once in +the course of processing the set of rules in ExpectedMaterials or +ExpectedProducts. + +Rules of type MATCH, ALLOW, CREATE, DELETE, MODIFY and DISALLOW are supported. + +All rules except for DISALLOW consume queued artifacts on success, and +leave the queue unchanged on failure. Hence, it is left to a terminal +DISALLOW rule to fail overall verification, if artifacts are left in the queue +that should have been consumed by preceding rules. +*/ +func VerifyArtifacts(items []interface{}, + itemsMetadata map[string]Metablock) error { + // Verify artifact rules for each item in the layout + for _, itemI := range items { + // The layout item (interface) must be a Link or an Inspection we are only + // interested in the name and the expected materials and products + var itemName string + var expectedMaterials [][]string + var expectedProducts [][]string + + switch item := itemI.(type) { + case Step: + itemName = item.Name + expectedMaterials = item.ExpectedMaterials + expectedProducts = item.ExpectedProducts + + case Inspection: + itemName = item.Name + expectedMaterials = item.ExpectedMaterials + expectedProducts = item.ExpectedProducts + + default: // Something wrong + return fmt.Errorf("VerifyArtifacts received an item of invalid type,"+ + " elements of passed slice 'items' must be one of 'Step' or"+ + " 'Inspection', got: '%s'", reflect.TypeOf(item)) + } + + // Use the item's name to extract the corresponding link + srcLinkMb, exists := itemsMetadata[itemName] + if !exists { + return fmt.Errorf("VerifyArtifacts could not find metadata"+ + " for item '%s', got: '%s'", itemName, itemsMetadata) + } + + // Create shortcuts to materials and products (including hashes) reported + // by the item's link, required to verify "match" rules + materials := srcLinkMb.Signed.(Link).Materials + products := srcLinkMb.Signed.(Link).Products + + // All other rules only require the material or product paths (without + // hashes). We extract them from the corresponding maps and store them as + // sets for convenience in further processing + materialPaths := NewSet() + for _, p := range InterfaceKeyStrings(materials) { + materialPaths.Add(path.Clean(p)) + } + productPaths := NewSet() + for _, p := range InterfaceKeyStrings(products) { + productPaths.Add(path.Clean(p)) + } + + // For `create`, `delete` and `modify` rules we prepare sets of artifacts + // (without hashes) that were created, deleted or modified in the current + // step or inspection + created := productPaths.Difference(materialPaths) + deleted := materialPaths.Difference(productPaths) + remained := materialPaths.Intersection(productPaths) + modified := NewSet() + for name := range remained { + if !reflect.DeepEqual(materials[name], products[name]) { + modified.Add(name) + } + } + + // For each item we have to run rule verification, once per artifact type. + // Here we prepare the corresponding data for each round. + verificationDataList := []map[string]interface{}{ + { + "srcType": "materials", + "rules": expectedMaterials, + "artifacts": materials, + "artifactPaths": materialPaths, + }, + { + "srcType": "products", + "rules": expectedProducts, + "artifacts": products, + "artifactPaths": productPaths, + }, + } + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("Verifying %s '%s' ", reflect.TypeOf(itemI), itemName) + + // Process all material rules using the corresponding materials and all + // product rules using the corresponding products + for _, verificationData := range verificationDataList { + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("%s...\n", verificationData["srcType"]) + + rules := verificationData["rules"].([][]string) + artifacts := verificationData["artifacts"].(map[string]interface{}) + + // Use artifacts (without hashes) as base queue. Each rule only operates + // on artifacts in that queue. If a rule consumes an artifact (i.e. can + // be applied successfully), the artifact is removed from the queue. By + // applying a DISALLOW rule eventually, verification may return an error, + // if the rule matches any artifacts in the queue that should have been + // consumed earlier. + queue := verificationData["artifactPaths"].(Set) + + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("Initial state\nMaterials: %s\nProducts: %s\nQueue: %s\n\n", + // materialPaths.Slice(), productPaths.Slice(), queue.Slice()) + + // Verify rules sequentially + for _, rule := range rules { + // Parse rule and error out if it is malformed + // NOTE: the rule format should have been validated before + ruleData, err := UnpackRule(rule) + if err != nil { + return err + } + + // Apply rule pattern to filter queued artifacts that are up for rule + // specific consumption + filtered := queue.Filter(path.Clean(ruleData["pattern"])) + + var consumed Set + switch ruleData["type"] { + case "match": + // Note: here we need to perform more elaborate filtering + consumed = verifyMatchRule(ruleData, artifacts, queue, itemsMetadata) + + case "allow": + // Consumes all filtered artifacts + consumed = filtered + + case "create": + // Consumes filtered artifacts that were created + consumed = filtered.Intersection(created) + + case "delete": + // Consumes filtered artifacts that were deleted + consumed = filtered.Intersection(deleted) + + case "modify": + // Consumes filtered artifacts that were modified + consumed = filtered.Intersection(modified) + + case "disallow": + // Does not consume but errors out if artifacts were filtered + if len(filtered) > 0 { + return fmt.Errorf("artifact verification failed for %s '%s',"+ + " %s %s disallowed by rule %s", + reflect.TypeOf(itemI).Name(), itemName, + verificationData["srcType"], filtered.Slice(), rule) + } + case "require": + // REQUIRE is somewhat of a weird animal that does not use + // patterns bur rather single filenames (for now). + if !queue.Has(ruleData["pattern"]) { + return fmt.Errorf("artifact verification failed for %s in REQUIRE '%s',"+ + " because %s is not in %s", verificationData["srcType"], + ruleData["pattern"], ruleData["pattern"], queue.Slice()) + } + } + // Update queue by removing consumed artifacts + queue = queue.Difference(consumed) + // TODO: Add logging library (see in-toto/in-toto-golang#4) + // fmt.Printf("Rule: %s\nQueue: %s\n\n", rule, queue.Slice()) + } + } + } + return nil +} + +/* +ReduceStepsMetadata merges for each step of the passed Layout all the passed +per-functionary links into a single link, asserting that the reported Materials +and Products are equal across links for a given step. This function may be +used at a time during the overall verification, where link threshold's have +been verified and subsequent verification only needs one exemplary link per +step. The function returns a map with one Metablock (link) per step: + + { + : Metablock, + : Metablock, + ... + } + +If links corresponding to the same step report different Materials or different +Products, the first return value is an empty Metablock map and the second +return value is the error. +*/ +func ReduceStepsMetadata(layout Layout, + stepsMetadata map[string]map[string]Metablock) (map[string]Metablock, + error) { + stepsMetadataReduced := make(map[string]Metablock) + + for _, step := range layout.Steps { + linksPerStep, ok := stepsMetadata[step.Name] + // We should never get here, layout verification must fail earlier + if !ok || len(linksPerStep) < 1 { + panic("Could not reduce metadata for step '" + step.Name + + "', no link metadata found.") + } + + // Get the first link (could be any link) for the current step, which will + // serve as reference link for below comparisons + var referenceKeyID string + var referenceLinkMb Metablock + for keyID, linkMb := range linksPerStep { + referenceLinkMb = linkMb + referenceKeyID = keyID + break + } + + // Only one link, nothing to reduce, take the reference link + if len(linksPerStep) == 1 { + stepsMetadataReduced[step.Name] = referenceLinkMb + + // Multiple links, reduce but first check + } else { + // Artifact maps must be equal for each type among all links + // TODO: What should we do if there are more links, than the + // threshold requires, but not all of them are equal? Right now we would + // also error. + for keyID, linkMb := range linksPerStep { + if !reflect.DeepEqual(linkMb.Signed.(Link).Materials, + referenceLinkMb.Signed.(Link).Materials) || + !reflect.DeepEqual(linkMb.Signed.(Link).Products, + referenceLinkMb.Signed.(Link).Products) { + return nil, fmt.Errorf("link '%s' and '%s' have different"+ + " artifacts", + fmt.Sprintf(LinkNameFormat, step.Name, referenceKeyID), + fmt.Sprintf(LinkNameFormat, step.Name, keyID)) + } + } + // We haven't errored out, so we can reduce (i.e take the reference link) + stepsMetadataReduced[step.Name] = referenceLinkMb + } + } + return stepsMetadataReduced, nil +} + +/* +VerifyStepCommandAlignment (soft) verifies that for each step of the passed +layout the command executed, as per the passed link, matches the expected +command, as per the layout. Soft verification means that, in case a command +does not align, a warning is issued. +*/ +func VerifyStepCommandAlignment(layout Layout, + stepsMetadata map[string]map[string]Metablock) { + for _, step := range layout.Steps { + linksPerStep, ok := stepsMetadata[step.Name] + // We should never get here, layout verification must fail earlier + if !ok || len(linksPerStep) < 1 { + panic("Could not verify command alignment for step '" + step.Name + + "', no link metadata found.") + } + + for signerKeyID, linkMb := range linksPerStep { + expectedCommandS := strings.Join(step.ExpectedCommand, " ") + executedCommandS := strings.Join(linkMb.Signed.(Link).Command, " ") + + if expectedCommandS != executedCommandS { + linkName := fmt.Sprintf(LinkNameFormat, step.Name, signerKeyID) + fmt.Printf("WARNING: Expected command for step '%s' (%s) and command"+ + " reported by '%s' (%s) differ.\n", + step.Name, expectedCommandS, linkName, executedCommandS) + } + } + } +} + +/* +LoadLayoutCertificates loads the root and intermediate CAs from the layout if in the layout. +This will be used to check signatures that were used to sign links but not configured +in the PubKeys section of the step. No configured CAs means we don't want to allow this. +Returned CertPools will be empty in this case. +*/ +func LoadLayoutCertificates(layout Layout, intermediatePems [][]byte) (*x509.CertPool, *x509.CertPool, error) { + rootPool := x509.NewCertPool() + for _, certPem := range layout.RootCas { + ok := rootPool.AppendCertsFromPEM([]byte(certPem.KeyVal.Certificate)) + if !ok { + return nil, nil, fmt.Errorf("failed to load root certificates for layout") + } + } + + intermediatePool := x509.NewCertPool() + for _, intermediatePem := range layout.IntermediateCas { + ok := intermediatePool.AppendCertsFromPEM([]byte(intermediatePem.KeyVal.Certificate)) + if !ok { + return nil, nil, fmt.Errorf("failed to load intermediate certificates for layout") + } + } + + for _, intermediatePem := range intermediatePems { + ok := intermediatePool.AppendCertsFromPEM(intermediatePem) + if !ok { + return nil, nil, fmt.Errorf("failed to load provided intermediate certificates") + } + } + + return rootPool, intermediatePool, nil +} + +/* +VerifyLinkSignatureThesholds verifies that for each step of the passed layout, +there are at least Threshold links, validly signed by different authorized +functionaries. The returned map of link metadata per steps contains only +links with valid signatures from distinct functionaries and has the format: + + { + : { + : Metablock, + : Metablock, + ... + }, + : { + : Metablock, + : Metablock, + ... + } + ... + } + +If for any step of the layout there are not enough links available, the first +return value is an empty map of Metablock maps and the second return value is +the error. +*/ +func VerifyLinkSignatureThesholds(layout Layout, + stepsMetadata map[string]map[string]Metablock, rootCertPool, intermediateCertPool *x509.CertPool) ( + map[string]map[string]Metablock, error) { + // This will stores links with valid signature from an authorized functionary + // for all steps + stepsMetadataVerified := make(map[string]map[string]Metablock) + + // Try to find enough (>= threshold) links each with a valid signature from + // distinct authorized functionaries for each step + for _, step := range layout.Steps { + var stepErr error + + // This will store links with valid signature from an authorized + // functionary for the given step + linksPerStepVerified := make(map[string]Metablock) + + // Check if there are any links at all for the given step + linksPerStep, ok := stepsMetadata[step.Name] + if !ok || len(linksPerStep) < 1 { + stepErr = fmt.Errorf("no links found") + } + + // For each link corresponding to a step, check that the signer key was + // authorized, the layout contains a verification key and the signature + // verification passes. Only good links are stored, to verify thresholds + // below. + isAuthorizedSignature := false + for signerKeyID, linkMb := range linksPerStep { + for _, authorizedKeyID := range step.PubKeys { + if signerKeyID == authorizedKeyID { + if verifierKey, ok := layout.Keys[authorizedKeyID]; ok { + if err := linkMb.VerifySignature(verifierKey); err == nil { + linksPerStepVerified[signerKeyID] = linkMb + isAuthorizedSignature = true + break + } + } + } + } + + // If the signer's key wasn't in our step's pubkeys array, check the cert pool to + // see if the key is known to us. + if !isAuthorizedSignature { + sig, err := linkMb.GetSignatureForKeyID(signerKeyID) + if err != nil { + stepErr = err + continue + } + + cert, err := sig.GetCertificate() + if err != nil { + stepErr = err + continue + } + + // test certificate against the step's constraints to make sure it's a valid functionary + err = step.CheckCertConstraints(cert, layout.RootCAIDs(), rootCertPool, intermediateCertPool) + if err != nil { + stepErr = err + continue + } + + err = linkMb.VerifySignature(cert) + if err != nil { + stepErr = err + continue + } + + linksPerStepVerified[signerKeyID] = linkMb + } + } + + // Store all good links for a step + stepsMetadataVerified[step.Name] = linksPerStepVerified + + if len(linksPerStepVerified) < step.Threshold { + linksPerStep := stepsMetadata[step.Name] + return nil, fmt.Errorf("step '%s' requires '%d' link metadata file(s)."+ + " '%d' out of '%d' available link(s) have a valid signature from an"+ + " authorized signer: %v", step.Name, step.Threshold, + len(linksPerStepVerified), len(linksPerStep), stepErr) + } + } + return stepsMetadataVerified, nil +} + +/* +LoadLinksForLayout loads for every Step of the passed Layout a Metablock +containing the corresponding Link. A base path to a directory that contains +the links may be passed using linkDir. Link file names are constructed, +using LinkNameFormat together with the corresponding step name and authorized +functionary key ids. A map of link metadata is returned and has the following +format: + + { + : { + : Metablock, + : Metablock, + ... + }, + : { + : Metablock, + : Metablock, + ... + } + ... + } + +If a link cannot be loaded at a constructed link name or is invalid, it is +ignored. Only a preliminary threshold check is performed, that is, if there +aren't at least Threshold links for any given step, the first return value +is an empty map of Metablock maps and the second return value is the error. +*/ +func LoadLinksForLayout(layout Layout, linkDir string) (map[string]map[string]Metablock, error) { + stepsMetadata := make(map[string]map[string]Metablock) + + for _, step := range layout.Steps { + linksPerStep := make(map[string]Metablock) + // Since we can verify against certificates belonging to a CA, we need to + // load any possible links + linkFiles, err := filepath.Glob(osPath.Join(linkDir, fmt.Sprintf(LinkGlobFormat, step.Name))) + if err != nil { + return nil, err + } + + for _, linkPath := range linkFiles { + var linkMb Metablock + if err := linkMb.Load(linkPath); err != nil { + continue + } + + // To get the full key from the metadata's signatures, we have to check + // for one with the same short id... + signerShortKeyID := strings.TrimSuffix(strings.TrimPrefix(filepath.Base(linkPath), step.Name+"."), ".link") + for _, sig := range linkMb.Signatures { + if strings.HasPrefix(sig.KeyID, signerShortKeyID) { + linksPerStep[sig.KeyID] = linkMb + break + } + } + } + + if len(linksPerStep) < step.Threshold { + return nil, fmt.Errorf("step '%s' requires '%d' link metadata file(s),"+ + " found '%d'", step.Name, step.Threshold, len(linksPerStep)) + } + + stepsMetadata[step.Name] = linksPerStep + } + + return stepsMetadata, nil +} + +/* +VerifyLayoutExpiration verifies that the passed Layout has not expired. It +returns an error if the (zulu) date in the Expires field is in the past. +*/ +func VerifyLayoutExpiration(layout Layout) error { + expires, err := time.Parse(ISO8601DateSchema, layout.Expires) + if err != nil { + return err + } + // Uses timezone of expires, i.e. UTC + if time.Until(expires) < 0 { + return fmt.Errorf("layout has expired on '%s'", expires) + } + return nil +} + +/* +VerifyLayoutSignatures verifies for each key in the passed key map the +corresponding signature of the Layout in the passed Metablock's Signed field. +Signatures and keys are associated by key id. If the key map is empty, or the +Metablock's Signature field does not have a signature for one or more of the +passed keys, or a matching signature is invalid, an error is returned. +*/ +func VerifyLayoutSignatures(layoutMb Metablock, + layoutKeys map[string]Key) error { + if len(layoutKeys) < 1 { + return fmt.Errorf("layout verification requires at least one key") + } + + for _, key := range layoutKeys { + if err := layoutMb.VerifySignature(key); err != nil { + return err + } + } + return nil +} + +/* +GetSummaryLink merges the materials of the first step (as mentioned in the +layout) and the products of the last step and returns a new link. This link +reports the materials and products and summarizes the overall software supply +chain. +NOTE: The assumption is that the steps mentioned in the layout are to be +performed sequentially. So, the first step mentioned in the layout denotes what +comes into the supply chain and the last step denotes what goes out. +*/ +func GetSummaryLink(layout Layout, stepsMetadataReduced map[string]Metablock, + stepName string) (Metablock, error) { + var summaryLink Link + var result Metablock + if len(layout.Steps) > 0 { + firstStepLink := stepsMetadataReduced[layout.Steps[0].Name] + lastStepLink := stepsMetadataReduced[layout.Steps[len(layout.Steps)-1].Name] + + summaryLink.Materials = firstStepLink.Signed.(Link).Materials + summaryLink.Name = stepName + summaryLink.Type = firstStepLink.Signed.(Link).Type + + summaryLink.Products = lastStepLink.Signed.(Link).Products + summaryLink.ByProducts = lastStepLink.Signed.(Link).ByProducts + // Using the last command of the sublayout as the command + // of the summary link can be misleading. Is it necessary to + // include all the commands executed as part of sublayout? + summaryLink.Command = lastStepLink.Signed.(Link).Command + } + + result.Signed = summaryLink + + return result, nil +} + +/* +VerifySublayouts checks if any step in the supply chain is a sublayout, and if +so, recursively resolves it and replaces it with a summary link summarizing the +steps carried out in the sublayout. +*/ +func VerifySublayouts(layout Layout, + stepsMetadataVerified map[string]map[string]Metablock, + superLayoutLinkPath string, intermediatePems [][]byte, lineNormalization bool) (map[string]map[string]Metablock, error) { + for stepName, linkData := range stepsMetadataVerified { + for keyID, metadata := range linkData { + if _, ok := metadata.Signed.(Layout); ok { + layoutKeys := make(map[string]Key) + layoutKeys[keyID] = layout.Keys[keyID] + + sublayoutLinkDir := fmt.Sprintf(SublayoutLinkDirFormat, + stepName, keyID) + sublayoutLinkPath := filepath.Join(superLayoutLinkPath, + sublayoutLinkDir) + summaryLink, err := InTotoVerify(metadata, layoutKeys, + sublayoutLinkPath, stepName, make(map[string]string), intermediatePems, lineNormalization) + if err != nil { + return nil, err + } + linkData[keyID] = summaryLink + } + + } + } + return stepsMetadataVerified, nil +} + +// TODO: find a better way than two helper functions for the replacer op + +func substituteParamatersInSlice(replacer *strings.Replacer, slice []string) []string { + newSlice := make([]string, 0) + for _, item := range slice { + newSlice = append(newSlice, replacer.Replace(item)) + } + return newSlice +} + +func substituteParametersInSliceOfSlices(replacer *strings.Replacer, + slice [][]string) [][]string { + newSlice := make([][]string, 0) + for _, item := range slice { + newSlice = append(newSlice, substituteParamatersInSlice(replacer, + item)) + } + return newSlice +} + +/* +SubstituteParameters performs parameter substitution in steps and inspections +in the following fields: +- Expected Materials and Expected Products of both +- Run of inspections +- Expected Command of steps +The substitution marker is '{}' and the keyword within the braces is replaced +by a value found in the substitution map passed, parameterDictionary. The +layout with parameters substituted is returned to the calling function. +*/ +func SubstituteParameters(layout Layout, + parameterDictionary map[string]string) (Layout, error) { + + if len(parameterDictionary) == 0 { + return layout, nil + } + + parameters := make([]string, 0) + + re := regexp.MustCompile("^[a-zA-Z0-9_-]+$") + + for parameter, value := range parameterDictionary { + parameterFormatCheck := re.MatchString(parameter) + if !parameterFormatCheck { + return layout, fmt.Errorf("invalid format for parameter") + } + + parameters = append(parameters, "{"+parameter+"}") + parameters = append(parameters, value) + } + + replacer := strings.NewReplacer(parameters...) + + for i := range layout.Steps { + layout.Steps[i].ExpectedMaterials = substituteParametersInSliceOfSlices( + replacer, layout.Steps[i].ExpectedMaterials) + layout.Steps[i].ExpectedProducts = substituteParametersInSliceOfSlices( + replacer, layout.Steps[i].ExpectedProducts) + layout.Steps[i].ExpectedCommand = substituteParamatersInSlice(replacer, + layout.Steps[i].ExpectedCommand) + } + + for i := range layout.Inspect { + layout.Inspect[i].ExpectedMaterials = + substituteParametersInSliceOfSlices(replacer, + layout.Inspect[i].ExpectedMaterials) + layout.Inspect[i].ExpectedProducts = + substituteParametersInSliceOfSlices(replacer, + layout.Inspect[i].ExpectedProducts) + layout.Inspect[i].Run = substituteParamatersInSlice(replacer, + layout.Inspect[i].Run) + } + + return layout, nil +} + +/* +InTotoVerify can be used to verify an entire software supply chain according to +the in-toto specification. It requires the metadata of the root layout, a map +that contains public keys to verify the root layout signatures, a path to a +directory from where it can load link metadata files, which are treated as +signed evidence for the steps defined in the layout, a step name, and a +paramater dictionary used for parameter substitution. The step name only +matters for sublayouts, where it's important to associate the summary of that +step with a unique name. The verification routine is as follows: + +1. Verify layout signature(s) using passed key(s) +2. Verify layout expiration date +3. Substitute parameters in layout +4. Load link metadata files for steps of layout +5. Verify signatures and signature thresholds for steps of layout +6. Verify sublayouts recursively +7. Verify command alignment for steps of layout (only warns) +8. Verify artifact rules for steps of layout +9. Execute inspection commands (generates link metadata for each inspection) +10. Verify artifact rules for inspections of layout + +InTotoVerify returns a summary link wrapped in a Metablock object and an error +value. If any of the verification routines fail, verification is aborted and +error is returned. In such an instance, the first value remains an empty +Metablock object. + +NOTE: Artifact rules of type "create", "modify" +and "delete" are currently not supported. +*/ +func InTotoVerify(layoutMb Metablock, layoutKeys map[string]Key, + linkDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) ( + Metablock, error) { + + var summaryLink Metablock + var err error + + // Verify root signatures + if err := VerifyLayoutSignatures(layoutMb, layoutKeys); err != nil { + return summaryLink, err + } + + // Extract the layout from its Metablock container (for further processing) + layout := layoutMb.Signed.(Layout) + + // Verify layout expiration + if err := VerifyLayoutExpiration(layout); err != nil { + return summaryLink, err + } + + // Substitute parameters in layout + layout, err = SubstituteParameters(layout, parameterDictionary) + if err != nil { + return summaryLink, err + } + + rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems) + if err != nil { + return summaryLink, err + } + + // Load links for layout + stepsMetadata, err := LoadLinksForLayout(layout, linkDir) + if err != nil { + return summaryLink, err + } + + // Verify link signatures + stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout, + stepsMetadata, rootCertPool, intermediateCertPool) + if err != nil { + return summaryLink, err + } + + // Verify and resolve sublayouts + stepsSublayoutVerified, err := VerifySublayouts(layout, + stepsMetadataVerified, linkDir, intermediatePems, lineNormalization) + if err != nil { + return summaryLink, err + } + + // Verify command alignment (WARNING only) + VerifyStepCommandAlignment(layout, stepsSublayoutVerified) + + // Given that signature thresholds have been checked above and the rest of + // the relevant link properties, i.e. materials and products, have to be + // exactly equal, we can reduce the map of steps metadata. However, we error + // if the relevant properties are not equal among links of a step. + stepsMetadataReduced, err := ReduceStepsMetadata(layout, + stepsSublayoutVerified) + if err != nil { + return summaryLink, err + } + + // Verify artifact rules + if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(), + stepsMetadataReduced); err != nil { + return summaryLink, err + } + + inspectionMetadata, err := RunInspections(layout, "", lineNormalization) + if err != nil { + return summaryLink, err + } + + // Add steps metadata to inspection metadata, because inspection artifact + // rules may also refer to artifacts reported by step links + for k, v := range stepsMetadataReduced { + inspectionMetadata[k] = v + } + + if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(), + inspectionMetadata); err != nil { + return summaryLink, err + } + + summaryLink, err = GetSummaryLink(layout, stepsMetadataReduced, stepName) + if err != nil { + return summaryLink, err + } + + return summaryLink, nil +} + +/* +InTotoVerifyWithDirectory provides the same functionality as IntotoVerify, but +adds the possibility to select a local directory from where the inspections are run. +*/ +func InTotoVerifyWithDirectory(layoutMb Metablock, layoutKeys map[string]Key, + linkDir string, runDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) ( + Metablock, error) { + + var summaryLink Metablock + var err error + + // runDir sanity checks + // check if path exists + info, err := os.Stat(runDir) + if err != nil { + return Metablock{}, err + } + + // check if runDir is a symlink + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + return Metablock{}, ErrInspectionRunDirIsSymlink + } + + // check if runDir is writable and a directory + err = isWritable(runDir) + if err != nil { + return Metablock{}, err + } + + // check if runDir is empty (we do not want to overwrite files) + // We abuse File.Readdirnames for this action. + f, err := os.Open(runDir) + if err != nil { + return Metablock{}, err + } + defer f.Close() + // We use Readdirnames(1) for performance reasons, one child node + // is enough to proof that the directory is not empty + _, err = f.Readdirnames(1) + // if io.EOF gets returned as error the directory is empty + if err == io.EOF { + return Metablock{}, err + } + err = f.Close() + if err != nil { + return Metablock{}, err + } + + // Verify root signatures + if err := VerifyLayoutSignatures(layoutMb, layoutKeys); err != nil { + return summaryLink, err + } + + // Extract the layout from its Metablock container (for further processing) + layout := layoutMb.Signed.(Layout) + + // Verify layout expiration + if err := VerifyLayoutExpiration(layout); err != nil { + return summaryLink, err + } + + // Substitute parameters in layout + layout, err = SubstituteParameters(layout, parameterDictionary) + if err != nil { + return summaryLink, err + } + + rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems) + if err != nil { + return summaryLink, err + } + + // Load links for layout + stepsMetadata, err := LoadLinksForLayout(layout, linkDir) + if err != nil { + return summaryLink, err + } + + // Verify link signatures + stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout, + stepsMetadata, rootCertPool, intermediateCertPool) + if err != nil { + return summaryLink, err + } + + // Verify and resolve sublayouts + stepsSublayoutVerified, err := VerifySublayouts(layout, + stepsMetadataVerified, linkDir, intermediatePems, lineNormalization) + if err != nil { + return summaryLink, err + } + + // Verify command alignment (WARNING only) + VerifyStepCommandAlignment(layout, stepsSublayoutVerified) + + // Given that signature thresholds have been checked above and the rest of + // the relevant link properties, i.e. materials and products, have to be + // exactly equal, we can reduce the map of steps metadata. However, we error + // if the relevant properties are not equal among links of a step. + stepsMetadataReduced, err := ReduceStepsMetadata(layout, + stepsSublayoutVerified) + if err != nil { + return summaryLink, err + } + + // Verify artifact rules + if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(), + stepsMetadataReduced); err != nil { + return summaryLink, err + } + + inspectionMetadata, err := RunInspections(layout, runDir, lineNormalization) + if err != nil { + return summaryLink, err + } + + // Add steps metadata to inspection metadata, because inspection artifact + // rules may also refer to artifacts reported by step links + for k, v := range stepsMetadataReduced { + inspectionMetadata[k] = v + } + + if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(), + inspectionMetadata); err != nil { + return summaryLink, err + } + + summaryLink, err = GetSummaryLink(layout, stepsMetadataReduced, stepName) + if err != nil { + return summaryLink, err + } + + return summaryLink, nil +} diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go index 939f2c2ca7..2567a0d970 100644 --- a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go +++ b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go @@ -6,12 +6,14 @@ package moby_buildkit_v1 import ( context "context" fmt "fmt" + rpc "github.com/gogo/googleapis/google/rpc" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" _ "github.com/golang/protobuf/ptypes/timestamp" types "github.com/moby/buildkit/api/types" pb "github.com/moby/buildkit/solver/pb" + pb1 "github.com/moby/buildkit/sourcepolicy/pb" github_com_moby_buildkit_util_entitlements "github.com/moby/buildkit/util/entitlements" github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" grpc "google.golang.org/grpc" @@ -35,6 +37,34 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +type BuildHistoryEventType int32 + +const ( + BuildHistoryEventType_STARTED BuildHistoryEventType = 0 + BuildHistoryEventType_COMPLETE BuildHistoryEventType = 1 + BuildHistoryEventType_DELETED BuildHistoryEventType = 2 +) + +var BuildHistoryEventType_name = map[int32]string{ + 0: "STARTED", + 1: "COMPLETE", + 2: "DELETED", +} + +var BuildHistoryEventType_value = map[string]int32{ + "STARTED": 0, + "COMPLETE": 1, + "DELETED": 2, +} + +func (x BuildHistoryEventType) String() string { + return proto.EnumName(BuildHistoryEventType_name, int32(x)) +} + +func (BuildHistoryEventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{0} +} + type PruneRequest struct { Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` All bool `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` @@ -347,6 +377,8 @@ type SolveRequest struct { Cache CacheOptions `protobuf:"bytes,8,opt,name=Cache,proto3" json:"Cache"` Entitlements []github_com_moby_buildkit_util_entitlements.Entitlement `protobuf:"bytes,9,rep,name=Entitlements,proto3,customtype=github.com/moby/buildkit/util/entitlements.Entitlement" json:"Entitlements,omitempty"` FrontendInputs map[string]*pb.Definition `protobuf:"bytes,10,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Internal bool `protobuf:"varint,11,opt,name=Internal,proto3" json:"Internal,omitempty"` + SourcePolicy *pb1.Policy `protobuf:"bytes,12,opt,name=SourcePolicy,proto3" json:"SourcePolicy,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -448,6 +480,20 @@ func (m *SolveRequest) GetFrontendInputs() map[string]*pb.Definition { return nil } +func (m *SolveRequest) GetInternal() bool { + if m != nil { + return m.Internal + } + return false +} + +func (m *SolveRequest) GetSourcePolicy() *pb1.Policy { + if m != nil { + return m.SourcePolicy + } + return nil +} + type CacheOptions struct { // ExportRefDeprecated is deprecated in favor or the new Exports since BuildKit v0.4.0. // When ExportRefDeprecated is set, the solver appends @@ -1240,7 +1286,663 @@ func (m *ListWorkersResponse) GetRecord() []*types.WorkerRecord { return nil } +type InfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InfoRequest) Reset() { *m = InfoRequest{} } +func (m *InfoRequest) String() string { return proto.CompactTextString(m) } +func (*InfoRequest) ProtoMessage() {} +func (*InfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{17} +} +func (m *InfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoRequest.Merge(m, src) +} +func (m *InfoRequest) XXX_Size() int { + return m.Size() +} +func (m *InfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InfoRequest proto.InternalMessageInfo + +type InfoResponse struct { + BuildkitVersion *types.BuildkitVersion `protobuf:"bytes,1,opt,name=buildkitVersion,proto3" json:"buildkitVersion,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InfoResponse) Reset() { *m = InfoResponse{} } +func (m *InfoResponse) String() string { return proto.CompactTextString(m) } +func (*InfoResponse) ProtoMessage() {} +func (*InfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{18} +} +func (m *InfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InfoResponse.Merge(m, src) +} +func (m *InfoResponse) XXX_Size() int { + return m.Size() +} +func (m *InfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InfoResponse proto.InternalMessageInfo + +func (m *InfoResponse) GetBuildkitVersion() *types.BuildkitVersion { + if m != nil { + return m.BuildkitVersion + } + return nil +} + +type BuildHistoryRequest struct { + ActiveOnly bool `protobuf:"varint,1,opt,name=ActiveOnly,proto3" json:"ActiveOnly,omitempty"` + Ref string `protobuf:"bytes,2,opt,name=Ref,proto3" json:"Ref,omitempty"` + EarlyExit bool `protobuf:"varint,3,opt,name=EarlyExit,proto3" json:"EarlyExit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BuildHistoryRequest) Reset() { *m = BuildHistoryRequest{} } +func (m *BuildHistoryRequest) String() string { return proto.CompactTextString(m) } +func (*BuildHistoryRequest) ProtoMessage() {} +func (*BuildHistoryRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{19} +} +func (m *BuildHistoryRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildHistoryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BuildHistoryRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BuildHistoryRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildHistoryRequest.Merge(m, src) +} +func (m *BuildHistoryRequest) XXX_Size() int { + return m.Size() +} +func (m *BuildHistoryRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BuildHistoryRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildHistoryRequest proto.InternalMessageInfo + +func (m *BuildHistoryRequest) GetActiveOnly() bool { + if m != nil { + return m.ActiveOnly + } + return false +} + +func (m *BuildHistoryRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +func (m *BuildHistoryRequest) GetEarlyExit() bool { + if m != nil { + return m.EarlyExit + } + return false +} + +type BuildHistoryEvent struct { + Type BuildHistoryEventType `protobuf:"varint,1,opt,name=type,proto3,enum=moby.buildkit.v1.BuildHistoryEventType" json:"type,omitempty"` + Record *BuildHistoryRecord `protobuf:"bytes,2,opt,name=record,proto3" json:"record,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BuildHistoryEvent) Reset() { *m = BuildHistoryEvent{} } +func (m *BuildHistoryEvent) String() string { return proto.CompactTextString(m) } +func (*BuildHistoryEvent) ProtoMessage() {} +func (*BuildHistoryEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{20} +} +func (m *BuildHistoryEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildHistoryEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BuildHistoryEvent.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BuildHistoryEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildHistoryEvent.Merge(m, src) +} +func (m *BuildHistoryEvent) XXX_Size() int { + return m.Size() +} +func (m *BuildHistoryEvent) XXX_DiscardUnknown() { + xxx_messageInfo_BuildHistoryEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildHistoryEvent proto.InternalMessageInfo + +func (m *BuildHistoryEvent) GetType() BuildHistoryEventType { + if m != nil { + return m.Type + } + return BuildHistoryEventType_STARTED +} + +func (m *BuildHistoryEvent) GetRecord() *BuildHistoryRecord { + if m != nil { + return m.Record + } + return nil +} + +type BuildHistoryRecord struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"` + FrontendAttrs map[string]string `protobuf:"bytes,3,rep,name=FrontendAttrs,proto3" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Exporters []*Exporter `protobuf:"bytes,4,rep,name=Exporters,proto3" json:"Exporters,omitempty"` + Error *rpc.Status `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` + CreatedAt *time.Time `protobuf:"bytes,6,opt,name=CreatedAt,proto3,stdtime" json:"CreatedAt,omitempty"` + CompletedAt *time.Time `protobuf:"bytes,7,opt,name=CompletedAt,proto3,stdtime" json:"CompletedAt,omitempty"` + Logs *Descriptor `protobuf:"bytes,8,opt,name=logs,proto3" json:"logs,omitempty"` + ExporterResponse map[string]string `protobuf:"bytes,9,rep,name=ExporterResponse,proto3" json:"ExporterResponse,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Result *BuildResultInfo `protobuf:"bytes,10,opt,name=Result,proto3" json:"Result,omitempty"` + Results map[string]*BuildResultInfo `protobuf:"bytes,11,rep,name=Results,proto3" json:"Results,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Generation int32 `protobuf:"varint,12,opt,name=Generation,proto3" json:"Generation,omitempty"` + Trace *Descriptor `protobuf:"bytes,13,opt,name=trace,proto3" json:"trace,omitempty"` + Pinned bool `protobuf:"varint,14,opt,name=pinned,proto3" json:"pinned,omitempty"` + NumCachedSteps int32 `protobuf:"varint,15,opt,name=numCachedSteps,proto3" json:"numCachedSteps,omitempty"` + NumTotalSteps int32 `protobuf:"varint,16,opt,name=numTotalSteps,proto3" json:"numTotalSteps,omitempty"` + NumCompletedSteps int32 `protobuf:"varint,17,opt,name=numCompletedSteps,proto3" json:"numCompletedSteps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BuildHistoryRecord) Reset() { *m = BuildHistoryRecord{} } +func (m *BuildHistoryRecord) String() string { return proto.CompactTextString(m) } +func (*BuildHistoryRecord) ProtoMessage() {} +func (*BuildHistoryRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{21} +} +func (m *BuildHistoryRecord) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildHistoryRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BuildHistoryRecord.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BuildHistoryRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildHistoryRecord.Merge(m, src) +} +func (m *BuildHistoryRecord) XXX_Size() int { + return m.Size() +} +func (m *BuildHistoryRecord) XXX_DiscardUnknown() { + xxx_messageInfo_BuildHistoryRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildHistoryRecord proto.InternalMessageInfo + +func (m *BuildHistoryRecord) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +func (m *BuildHistoryRecord) GetFrontend() string { + if m != nil { + return m.Frontend + } + return "" +} + +func (m *BuildHistoryRecord) GetFrontendAttrs() map[string]string { + if m != nil { + return m.FrontendAttrs + } + return nil +} + +func (m *BuildHistoryRecord) GetExporters() []*Exporter { + if m != nil { + return m.Exporters + } + return nil +} + +func (m *BuildHistoryRecord) GetError() *rpc.Status { + if m != nil { + return m.Error + } + return nil +} + +func (m *BuildHistoryRecord) GetCreatedAt() *time.Time { + if m != nil { + return m.CreatedAt + } + return nil +} + +func (m *BuildHistoryRecord) GetCompletedAt() *time.Time { + if m != nil { + return m.CompletedAt + } + return nil +} + +func (m *BuildHistoryRecord) GetLogs() *Descriptor { + if m != nil { + return m.Logs + } + return nil +} + +func (m *BuildHistoryRecord) GetExporterResponse() map[string]string { + if m != nil { + return m.ExporterResponse + } + return nil +} + +func (m *BuildHistoryRecord) GetResult() *BuildResultInfo { + if m != nil { + return m.Result + } + return nil +} + +func (m *BuildHistoryRecord) GetResults() map[string]*BuildResultInfo { + if m != nil { + return m.Results + } + return nil +} + +func (m *BuildHistoryRecord) GetGeneration() int32 { + if m != nil { + return m.Generation + } + return 0 +} + +func (m *BuildHistoryRecord) GetTrace() *Descriptor { + if m != nil { + return m.Trace + } + return nil +} + +func (m *BuildHistoryRecord) GetPinned() bool { + if m != nil { + return m.Pinned + } + return false +} + +func (m *BuildHistoryRecord) GetNumCachedSteps() int32 { + if m != nil { + return m.NumCachedSteps + } + return 0 +} + +func (m *BuildHistoryRecord) GetNumTotalSteps() int32 { + if m != nil { + return m.NumTotalSteps + } + return 0 +} + +func (m *BuildHistoryRecord) GetNumCompletedSteps() int32 { + if m != nil { + return m.NumCompletedSteps + } + return 0 +} + +type UpdateBuildHistoryRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + Pinned bool `protobuf:"varint,2,opt,name=Pinned,proto3" json:"Pinned,omitempty"` + Delete bool `protobuf:"varint,3,opt,name=Delete,proto3" json:"Delete,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateBuildHistoryRequest) Reset() { *m = UpdateBuildHistoryRequest{} } +func (m *UpdateBuildHistoryRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateBuildHistoryRequest) ProtoMessage() {} +func (*UpdateBuildHistoryRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{22} +} +func (m *UpdateBuildHistoryRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateBuildHistoryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateBuildHistoryRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateBuildHistoryRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateBuildHistoryRequest.Merge(m, src) +} +func (m *UpdateBuildHistoryRequest) XXX_Size() int { + return m.Size() +} +func (m *UpdateBuildHistoryRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateBuildHistoryRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateBuildHistoryRequest proto.InternalMessageInfo + +func (m *UpdateBuildHistoryRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +func (m *UpdateBuildHistoryRequest) GetPinned() bool { + if m != nil { + return m.Pinned + } + return false +} + +func (m *UpdateBuildHistoryRequest) GetDelete() bool { + if m != nil { + return m.Delete + } + return false +} + +type UpdateBuildHistoryResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateBuildHistoryResponse) Reset() { *m = UpdateBuildHistoryResponse{} } +func (m *UpdateBuildHistoryResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateBuildHistoryResponse) ProtoMessage() {} +func (*UpdateBuildHistoryResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{23} +} +func (m *UpdateBuildHistoryResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateBuildHistoryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateBuildHistoryResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateBuildHistoryResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateBuildHistoryResponse.Merge(m, src) +} +func (m *UpdateBuildHistoryResponse) XXX_Size() int { + return m.Size() +} +func (m *UpdateBuildHistoryResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateBuildHistoryResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateBuildHistoryResponse proto.InternalMessageInfo + +type Descriptor struct { + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Descriptor) Reset() { *m = Descriptor{} } +func (m *Descriptor) String() string { return proto.CompactTextString(m) } +func (*Descriptor) ProtoMessage() {} +func (*Descriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{24} +} +func (m *Descriptor) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Descriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Descriptor.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Descriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_Descriptor.Merge(m, src) +} +func (m *Descriptor) XXX_Size() int { + return m.Size() +} +func (m *Descriptor) XXX_DiscardUnknown() { + xxx_messageInfo_Descriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_Descriptor proto.InternalMessageInfo + +func (m *Descriptor) GetMediaType() string { + if m != nil { + return m.MediaType + } + return "" +} + +func (m *Descriptor) GetSize_() int64 { + if m != nil { + return m.Size_ + } + return 0 +} + +func (m *Descriptor) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +type BuildResultInfo struct { + Result *Descriptor `protobuf:"bytes,1,opt,name=Result,proto3" json:"Result,omitempty"` + Attestations []*Descriptor `protobuf:"bytes,2,rep,name=Attestations,proto3" json:"Attestations,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BuildResultInfo) Reset() { *m = BuildResultInfo{} } +func (m *BuildResultInfo) String() string { return proto.CompactTextString(m) } +func (*BuildResultInfo) ProtoMessage() {} +func (*BuildResultInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{25} +} +func (m *BuildResultInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildResultInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BuildResultInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BuildResultInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildResultInfo.Merge(m, src) +} +func (m *BuildResultInfo) XXX_Size() int { + return m.Size() +} +func (m *BuildResultInfo) XXX_DiscardUnknown() { + xxx_messageInfo_BuildResultInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildResultInfo proto.InternalMessageInfo + +func (m *BuildResultInfo) GetResult() *Descriptor { + if m != nil { + return m.Result + } + return nil +} + +func (m *BuildResultInfo) GetAttestations() []*Descriptor { + if m != nil { + return m.Attestations + } + return nil +} + +type Exporter struct { + Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` + Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Exporter) Reset() { *m = Exporter{} } +func (m *Exporter) String() string { return proto.CompactTextString(m) } +func (*Exporter) ProtoMessage() {} +func (*Exporter) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{26} +} +func (m *Exporter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Exporter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Exporter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Exporter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exporter.Merge(m, src) +} +func (m *Exporter) XXX_Size() int { + return m.Size() +} +func (m *Exporter) XXX_DiscardUnknown() { + xxx_messageInfo_Exporter.DiscardUnknown(m) +} + +var xxx_messageInfo_Exporter proto.InternalMessageInfo + +func (m *Exporter) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Exporter) GetAttrs() map[string]string { + if m != nil { + return m.Attrs + } + return nil +} + func init() { + proto.RegisterEnum("moby.buildkit.v1.BuildHistoryEventType", BuildHistoryEventType_name, BuildHistoryEventType_value) proto.RegisterType((*PruneRequest)(nil), "moby.buildkit.v1.PruneRequest") proto.RegisterType((*DiskUsageRequest)(nil), "moby.buildkit.v1.DiskUsageRequest") proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse") @@ -1264,109 +1966,169 @@ func init() { proto.RegisterType((*BytesMessage)(nil), "moby.buildkit.v1.BytesMessage") proto.RegisterType((*ListWorkersRequest)(nil), "moby.buildkit.v1.ListWorkersRequest") proto.RegisterType((*ListWorkersResponse)(nil), "moby.buildkit.v1.ListWorkersResponse") + proto.RegisterType((*InfoRequest)(nil), "moby.buildkit.v1.InfoRequest") + proto.RegisterType((*InfoResponse)(nil), "moby.buildkit.v1.InfoResponse") + proto.RegisterType((*BuildHistoryRequest)(nil), "moby.buildkit.v1.BuildHistoryRequest") + proto.RegisterType((*BuildHistoryEvent)(nil), "moby.buildkit.v1.BuildHistoryEvent") + proto.RegisterType((*BuildHistoryRecord)(nil), "moby.buildkit.v1.BuildHistoryRecord") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.BuildHistoryRecord.ExporterResponseEntry") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.BuildHistoryRecord.FrontendAttrsEntry") + proto.RegisterMapType((map[string]*BuildResultInfo)(nil), "moby.buildkit.v1.BuildHistoryRecord.ResultsEntry") + proto.RegisterType((*UpdateBuildHistoryRequest)(nil), "moby.buildkit.v1.UpdateBuildHistoryRequest") + proto.RegisterType((*UpdateBuildHistoryResponse)(nil), "moby.buildkit.v1.UpdateBuildHistoryResponse") + proto.RegisterType((*Descriptor)(nil), "moby.buildkit.v1.Descriptor") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.Descriptor.AnnotationsEntry") + proto.RegisterType((*BuildResultInfo)(nil), "moby.buildkit.v1.BuildResultInfo") + proto.RegisterType((*Exporter)(nil), "moby.buildkit.v1.Exporter") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.Exporter.AttrsEntry") } func init() { proto.RegisterFile("control.proto", fileDescriptor_0c5120591600887d) } var fileDescriptor_0c5120591600887d = []byte{ - // 1543 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xcd, 0x6f, 0x1b, 0x45, - 0x14, 0xef, 0xda, 0xf1, 0xd7, 0x8b, 0x13, 0xa5, 0xd3, 0x52, 0xad, 0x16, 0x91, 0xa4, 0xdb, 0x22, - 0x45, 0x55, 0xbb, 0x4e, 0x03, 0x85, 0x12, 0x3e, 0xd4, 0x3a, 0x2e, 0x34, 0x55, 0x23, 0xca, 0xa4, - 0xa5, 0x52, 0x0f, 0x48, 0x6b, 0x7b, 0xbc, 0x59, 0x65, 0xbd, 0xb3, 0xcc, 0xcc, 0xa6, 0x35, 0x7f, - 0x00, 0x67, 0x6e, 0xfc, 0x01, 0x1c, 0x38, 0x71, 0xe6, 0x2f, 0x40, 0xea, 0x91, 0x73, 0x0f, 0x01, - 0xf5, 0x0e, 0xe2, 0xc8, 0x11, 0xcd, 0xc7, 0x3a, 0xeb, 0xd8, 0xce, 0x57, 0x39, 0x79, 0xde, 0xcc, - 0x7b, 0xbf, 0x7d, 0x9f, 0x33, 0xef, 0x19, 0xe6, 0x3a, 0x34, 0x16, 0x8c, 0x46, 0x5e, 0xc2, 0xa8, - 0xa0, 0x68, 0xa1, 0x4f, 0xdb, 0x03, 0xaf, 0x9d, 0x86, 0x51, 0x77, 0x37, 0x14, 0xde, 0xde, 0x4d, - 0xe7, 0x46, 0x10, 0x8a, 0x9d, 0xb4, 0xed, 0x75, 0x68, 0xbf, 0x11, 0xd0, 0x80, 0x36, 0x14, 0x63, - 0x3b, 0xed, 0x29, 0x4a, 0x11, 0x6a, 0xa5, 0x01, 0x9c, 0xa5, 0x80, 0xd2, 0x20, 0x22, 0x07, 0x5c, - 0x22, 0xec, 0x13, 0x2e, 0xfc, 0x7e, 0x62, 0x18, 0xae, 0xe7, 0xf0, 0xe4, 0xc7, 0x1a, 0xd9, 0xc7, - 0x1a, 0x9c, 0x46, 0x7b, 0x84, 0x35, 0x92, 0x76, 0x83, 0x26, 0xdc, 0x70, 0x37, 0xa6, 0x72, 0xfb, - 0x49, 0xd8, 0x10, 0x83, 0x84, 0xf0, 0xc6, 0x73, 0xca, 0x76, 0x09, 0xd3, 0x02, 0xee, 0xf7, 0x16, - 0xd4, 0x1f, 0xb1, 0x34, 0x26, 0x98, 0x7c, 0x9b, 0x12, 0x2e, 0xd0, 0x25, 0x28, 0xf7, 0xc2, 0x48, - 0x10, 0x66, 0x5b, 0xcb, 0xc5, 0x95, 0x1a, 0x36, 0x14, 0x5a, 0x80, 0xa2, 0x1f, 0x45, 0x76, 0x61, - 0xd9, 0x5a, 0xa9, 0x62, 0xb9, 0x44, 0x2b, 0x50, 0xdf, 0x25, 0x24, 0x69, 0xa5, 0xcc, 0x17, 0x21, - 0x8d, 0xed, 0xe2, 0xb2, 0xb5, 0x52, 0x6c, 0xce, 0xbc, 0xdc, 0x5f, 0xb2, 0xf0, 0xc8, 0x09, 0x72, - 0xa1, 0x26, 0xe9, 0xe6, 0x40, 0x10, 0x6e, 0xcf, 0xe4, 0xd8, 0x0e, 0xb6, 0xdd, 0x6b, 0xb0, 0xd0, - 0x0a, 0xf9, 0xee, 0x13, 0xee, 0x07, 0xc7, 0xe9, 0xe2, 0x3e, 0x80, 0xf3, 0x39, 0x5e, 0x9e, 0xd0, - 0x98, 0x13, 0x74, 0x0b, 0xca, 0x8c, 0x74, 0x28, 0xeb, 0x2a, 0xe6, 0xd9, 0xb5, 0x77, 0xbc, 0xc3, - 0xb1, 0xf1, 0x8c, 0x80, 0x64, 0xc2, 0x86, 0xd9, 0xfd, 0xb1, 0x08, 0xb3, 0xb9, 0x7d, 0x34, 0x0f, - 0x85, 0xcd, 0x96, 0x6d, 0x2d, 0x5b, 0x2b, 0x35, 0x5c, 0xd8, 0x6c, 0x21, 0x1b, 0x2a, 0x5b, 0xa9, - 0xf0, 0xdb, 0x11, 0x31, 0xb6, 0x67, 0x24, 0xba, 0x08, 0xa5, 0xcd, 0xf8, 0x09, 0x27, 0xca, 0xf0, - 0x2a, 0xd6, 0x04, 0x42, 0x30, 0xb3, 0x1d, 0x7e, 0x47, 0xb4, 0x99, 0x58, 0xad, 0x91, 0x03, 0xe5, - 0x47, 0x3e, 0x23, 0xb1, 0xb0, 0x4b, 0x12, 0xb7, 0x59, 0xb0, 0x2d, 0x6c, 0x76, 0x50, 0x13, 0x6a, - 0x1b, 0x8c, 0xf8, 0x82, 0x74, 0xef, 0x0a, 0xbb, 0xbc, 0x6c, 0xad, 0xcc, 0xae, 0x39, 0x9e, 0x4e, - 0x0a, 0x2f, 0x4b, 0x0a, 0xef, 0x71, 0x96, 0x14, 0xcd, 0xea, 0xcb, 0xfd, 0xa5, 0x73, 0x3f, 0xfc, - 0x21, 0x7d, 0x37, 0x14, 0x43, 0x77, 0x00, 0x1e, 0xfa, 0x5c, 0x3c, 0xe1, 0x0a, 0xa4, 0x72, 0x2c, - 0xc8, 0x8c, 0x02, 0xc8, 0xc9, 0xa0, 0x45, 0x00, 0xe5, 0x84, 0x0d, 0x9a, 0xc6, 0xc2, 0xae, 0x2a, - 0xdd, 0x73, 0x3b, 0x68, 0x19, 0x66, 0x5b, 0x84, 0x77, 0x58, 0x98, 0xa8, 0x50, 0xd7, 0x94, 0x7b, - 0xf2, 0x5b, 0x12, 0x41, 0x7b, 0xf0, 0xf1, 0x20, 0x21, 0x36, 0x28, 0x86, 0xdc, 0x8e, 0x8c, 0xe5, - 0xf6, 0x8e, 0xcf, 0x48, 0xd7, 0x9e, 0x55, 0xee, 0x32, 0x94, 0xf4, 0xaf, 0xf6, 0x04, 0xb7, 0xeb, - 0x2a, 0xc8, 0x19, 0xe9, 0xfe, 0x54, 0x86, 0xfa, 0xb6, 0xcc, 0xf1, 0x2c, 0x1d, 0x16, 0xa0, 0x88, - 0x49, 0xcf, 0xc4, 0x46, 0x2e, 0x91, 0x07, 0xd0, 0x22, 0xbd, 0x30, 0x0e, 0x95, 0x56, 0x05, 0x65, - 0xf8, 0xbc, 0x97, 0xb4, 0xbd, 0x83, 0x5d, 0x9c, 0xe3, 0x40, 0x0e, 0x54, 0xef, 0xbd, 0x48, 0x28, - 0x93, 0x29, 0x55, 0x54, 0x30, 0x43, 0x1a, 0x3d, 0x85, 0xb9, 0x6c, 0x7d, 0x57, 0x08, 0x26, 0x13, - 0x55, 0xa6, 0xd1, 0xcd, 0xf1, 0x34, 0xca, 0x2b, 0xe5, 0x8d, 0xc8, 0xdc, 0x8b, 0x05, 0x1b, 0xe0, - 0x51, 0x1c, 0x69, 0xe1, 0x36, 0xe1, 0x5c, 0x6a, 0xa8, 0xc2, 0x8f, 0x33, 0x52, 0xaa, 0xf3, 0x39, - 0xa3, 0xb1, 0x20, 0x71, 0x57, 0x85, 0xbe, 0x86, 0x87, 0xb4, 0x54, 0x27, 0x5b, 0x6b, 0x75, 0x2a, - 0x27, 0x52, 0x67, 0x44, 0xc6, 0xa8, 0x33, 0xb2, 0x87, 0xd6, 0xa1, 0xb4, 0xe1, 0x77, 0x76, 0x88, - 0x8a, 0xf2, 0xec, 0xda, 0xe2, 0x38, 0xa0, 0x3a, 0xfe, 0x52, 0x85, 0x95, 0xab, 0x42, 0x3d, 0x87, - 0xb5, 0x08, 0xfa, 0x06, 0xea, 0xf7, 0x62, 0x11, 0x8a, 0x88, 0xf4, 0x55, 0xc4, 0x6a, 0x32, 0x62, - 0xcd, 0xf5, 0x57, 0xfb, 0x4b, 0x1f, 0x4c, 0xbd, 0x78, 0x52, 0x11, 0x46, 0x0d, 0x92, 0x93, 0xf2, - 0x72, 0x10, 0x78, 0x04, 0x0f, 0x3d, 0x83, 0xf9, 0x4c, 0xd9, 0xcd, 0x38, 0x49, 0x05, 0xb7, 0x41, - 0x59, 0xbd, 0x76, 0x42, 0xab, 0xb5, 0x90, 0x36, 0xfb, 0x10, 0x92, 0x73, 0x07, 0xd0, 0x78, 0xac, - 0x64, 0x4e, 0xed, 0x92, 0x41, 0x96, 0x53, 0xbb, 0x64, 0x20, 0xcb, 0x7a, 0xcf, 0x8f, 0x52, 0x5d, - 0xee, 0x35, 0xac, 0x89, 0xf5, 0xc2, 0x6d, 0x4b, 0x22, 0x8c, 0xbb, 0xf7, 0x54, 0x08, 0x5f, 0xc1, - 0x85, 0x09, 0xaa, 0x4e, 0x80, 0xb8, 0x9a, 0x87, 0x18, 0xcf, 0xe9, 0x03, 0x48, 0xf7, 0x97, 0x22, - 0xd4, 0xf3, 0x01, 0x43, 0xab, 0x70, 0x41, 0xdb, 0x89, 0x49, 0xaf, 0x45, 0x12, 0x46, 0x3a, 0xf2, - 0x96, 0x30, 0xe0, 0x93, 0x8e, 0xd0, 0x1a, 0x5c, 0xdc, 0xec, 0x9b, 0x6d, 0x9e, 0x13, 0x29, 0xa8, - 0x7a, 0x9c, 0x78, 0x86, 0x28, 0xbc, 0xa5, 0xa1, 0x94, 0x27, 0x72, 0x42, 0x45, 0x15, 0xb0, 0x8f, - 0x8e, 0xce, 0x2a, 0x6f, 0xa2, 0xac, 0x8e, 0xdb, 0x64, 0x5c, 0xf4, 0x29, 0x54, 0xf4, 0x41, 0x56, - 0x98, 0x57, 0x8e, 0xfe, 0x84, 0x06, 0xcb, 0x64, 0xa4, 0xb8, 0xb6, 0x83, 0xdb, 0xa5, 0x53, 0x88, - 0x1b, 0x19, 0xe7, 0x3e, 0x38, 0xd3, 0x55, 0x3e, 0x4d, 0x0a, 0xb8, 0x3f, 0x5b, 0x70, 0x7e, 0xec, - 0x43, 0xf2, 0xd5, 0x50, 0xf7, 0xa6, 0x86, 0x50, 0x6b, 0xd4, 0x82, 0x92, 0xae, 0xfc, 0x82, 0x52, - 0xd8, 0x3b, 0x81, 0xc2, 0x5e, 0xae, 0xec, 0xb5, 0xb0, 0x73, 0x1b, 0xe0, 0x6c, 0xc9, 0xea, 0xfe, - 0x6a, 0xc1, 0x9c, 0xa9, 0x32, 0xf3, 0xc4, 0xfa, 0xb0, 0x90, 0x95, 0x50, 0xb6, 0x67, 0x1e, 0xdb, - 0x5b, 0x53, 0x0b, 0x54, 0xb3, 0x79, 0x87, 0xe5, 0xb4, 0x8e, 0x63, 0x70, 0xce, 0x46, 0x96, 0x57, - 0x87, 0x58, 0x4f, 0xa5, 0xf9, 0x65, 0x98, 0xdb, 0x16, 0xbe, 0x48, 0xf9, 0xd4, 0x97, 0xc3, 0xfd, - 0xc7, 0x82, 0xf9, 0x8c, 0xc7, 0x58, 0xf7, 0x3e, 0x54, 0xf7, 0x08, 0x13, 0xe4, 0x05, 0xe1, 0xc6, - 0x2a, 0x7b, 0xdc, 0xaa, 0xaf, 0x15, 0x07, 0x1e, 0x72, 0xa2, 0x75, 0xa8, 0x72, 0x85, 0x43, 0xb2, - 0x40, 0x2d, 0x4e, 0x93, 0x32, 0xdf, 0x1b, 0xf2, 0xa3, 0x06, 0xcc, 0x44, 0x34, 0xe0, 0xa6, 0x66, - 0xde, 0x9e, 0x26, 0xf7, 0x90, 0x06, 0x58, 0x31, 0xa2, 0x8f, 0xa1, 0xfa, 0xdc, 0x67, 0x71, 0x18, - 0x07, 0x59, 0x15, 0x2c, 0x4d, 0x13, 0x7a, 0xaa, 0xf9, 0xf0, 0x50, 0x40, 0x76, 0x3a, 0x65, 0x7d, - 0x86, 0x1e, 0x40, 0xb9, 0x1b, 0x06, 0x84, 0x0b, 0xed, 0x92, 0xe6, 0x9a, 0xbc, 0xe4, 0x5f, 0xed, - 0x2f, 0x5d, 0xcb, 0xdd, 0xe2, 0x34, 0x21, 0xb1, 0x6c, 0x76, 0xfd, 0x30, 0x26, 0x8c, 0x37, 0x02, - 0x7a, 0x43, 0x8b, 0x78, 0x2d, 0xf5, 0x83, 0x0d, 0x82, 0xc4, 0x0a, 0xf5, 0x5d, 0xad, 0xee, 0x8b, - 0xb3, 0x61, 0x69, 0x04, 0x59, 0x06, 0xb1, 0xdf, 0x27, 0xe6, 0x6d, 0x56, 0x6b, 0xd9, 0x38, 0x74, - 0x64, 0x9e, 0x77, 0x55, 0x4b, 0x55, 0xc5, 0x86, 0x42, 0xeb, 0x50, 0xe1, 0xc2, 0x67, 0xf2, 0xce, - 0x29, 0x9d, 0xb0, 0xe3, 0xc9, 0x04, 0xd0, 0x67, 0x50, 0xeb, 0xd0, 0x7e, 0x12, 0x11, 0x29, 0x5d, - 0x3e, 0xa1, 0xf4, 0x81, 0x88, 0x4c, 0x3d, 0xc2, 0x18, 0x65, 0xaa, 0xd7, 0xaa, 0x61, 0x4d, 0xa0, - 0x0f, 0x61, 0x2e, 0x61, 0x34, 0x60, 0x84, 0xf3, 0x2f, 0x18, 0x4d, 0x13, 0xf3, 0xc2, 0x9e, 0x97, - 0x97, 0xf7, 0xa3, 0xfc, 0x01, 0x1e, 0xe5, 0x73, 0xff, 0x2e, 0x40, 0x3d, 0x9f, 0x22, 0x63, 0x4d, - 0xe8, 0x03, 0x28, 0xeb, 0x84, 0xd3, 0xb9, 0x7e, 0x36, 0x1f, 0x6b, 0x84, 0x89, 0x3e, 0xb6, 0xa1, - 0xd2, 0x49, 0x99, 0xea, 0x50, 0x75, 0xdf, 0x9a, 0x91, 0xd2, 0x52, 0x41, 0x85, 0x1f, 0x29, 0x1f, - 0x17, 0xb1, 0x26, 0x64, 0xd3, 0x3a, 0x9c, 0x53, 0x4e, 0xd7, 0xb4, 0x0e, 0xc5, 0xf2, 0xf1, 0xab, - 0xbc, 0x51, 0xfc, 0xaa, 0xa7, 0x8e, 0x9f, 0xfb, 0x9b, 0x05, 0xb5, 0x61, 0x6d, 0xe5, 0xbc, 0x6b, - 0xbd, 0xb1, 0x77, 0x47, 0x3c, 0x53, 0x38, 0x9b, 0x67, 0x2e, 0x41, 0x99, 0x0b, 0x46, 0xfc, 0xbe, - 0x1e, 0xa9, 0xb0, 0xa1, 0xe4, 0x2d, 0xd6, 0xe7, 0x81, 0x8a, 0x50, 0x1d, 0xcb, 0xa5, 0xfb, 0xaf, - 0x05, 0x73, 0x23, 0xe5, 0xfe, 0xbf, 0xda, 0x72, 0x11, 0x4a, 0x11, 0xd9, 0x23, 0x7a, 0xe8, 0x2b, - 0x62, 0x4d, 0xc8, 0x5d, 0xbe, 0x43, 0x99, 0x50, 0xca, 0xd5, 0xb1, 0x26, 0xa4, 0xce, 0x5d, 0x22, - 0xfc, 0x30, 0x52, 0xf7, 0x52, 0x1d, 0x1b, 0x4a, 0xea, 0x9c, 0xb2, 0xc8, 0x34, 0xbe, 0x72, 0x89, - 0x5c, 0x98, 0x09, 0xe3, 0x1e, 0x35, 0x69, 0xa3, 0x3a, 0x9b, 0x6d, 0x9a, 0xb2, 0x0e, 0xd9, 0x8c, - 0x7b, 0x14, 0xab, 0x33, 0x74, 0x19, 0xca, 0xcc, 0x8f, 0x03, 0x92, 0x75, 0xbd, 0x35, 0xc9, 0x85, - 0xe5, 0x0e, 0x36, 0x07, 0xae, 0x0b, 0x75, 0x35, 0x38, 0x6e, 0x11, 0x2e, 0xc7, 0x14, 0x99, 0xd6, - 0x5d, 0x5f, 0xf8, 0xca, 0xec, 0x3a, 0x56, 0x6b, 0xf7, 0x3a, 0xa0, 0x87, 0x21, 0x17, 0x4f, 0xd5, - 0xc0, 0xcb, 0x8f, 0x9b, 0x2a, 0xb7, 0xe1, 0xc2, 0x08, 0xb7, 0x79, 0x16, 0x3e, 0x39, 0x34, 0x57, - 0x5e, 0x1d, 0xbf, 0x71, 0xd5, 0x5c, 0xed, 0x69, 0xc1, 0xd1, 0xf1, 0x72, 0xed, 0xaf, 0x22, 0x54, - 0x36, 0xf4, 0x5f, 0x06, 0xe8, 0x31, 0xd4, 0x86, 0x63, 0x2b, 0x72, 0xc7, 0x61, 0x0e, 0xcf, 0xbf, - 0xce, 0x95, 0x23, 0x79, 0x8c, 0x7e, 0xf7, 0xa1, 0xa4, 0x06, 0x78, 0x34, 0xe1, 0xdd, 0xc9, 0x4f, - 0xf6, 0xce, 0xd1, 0x03, 0xf1, 0xaa, 0x25, 0x91, 0xd4, 0xa3, 0x3d, 0x09, 0x29, 0xdf, 0x6e, 0x3b, - 0x4b, 0xc7, 0xbc, 0xf6, 0x68, 0x0b, 0xca, 0xe6, 0x26, 0x9b, 0xc4, 0x9a, 0x7f, 0x9a, 0x9d, 0xe5, - 0xe9, 0x0c, 0x1a, 0x6c, 0xd5, 0x42, 0x5b, 0xc3, 0x09, 0x6a, 0x92, 0x6a, 0xf9, 0x34, 0x70, 0x8e, - 0x39, 0x5f, 0xb1, 0x56, 0x2d, 0xf4, 0x0c, 0x66, 0x73, 0x81, 0x46, 0x13, 0x02, 0x3a, 0x9e, 0x35, - 0xce, 0xbb, 0xc7, 0x70, 0x69, 0x65, 0x9b, 0xf5, 0x97, 0xaf, 0x17, 0xad, 0xdf, 0x5f, 0x2f, 0x5a, - 0x7f, 0xbe, 0x5e, 0xb4, 0xda, 0x65, 0x55, 0xf2, 0xef, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x54, - 0x8e, 0x72, 0x11, 0x36, 0x12, 0x00, 0x00, + // 2261 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0xcd, 0x6e, 0x1b, 0xc9, + 0x11, 0xde, 0x21, 0x25, 0xfe, 0x14, 0x29, 0x59, 0x6a, 0x7b, 0x8d, 0xc9, 0xc4, 0x2b, 0xc9, 0xb3, + 0x76, 0x22, 0x38, 0xf6, 0x50, 0xcb, 0xac, 0x63, 0xaf, 0x9c, 0x38, 0x16, 0x45, 0x66, 0x2d, 0xc7, + 0x82, 0xb5, 0x2d, 0x79, 0x0d, 0x2c, 0xe0, 0x04, 0x23, 0xb2, 0x45, 0x0f, 0x34, 0x9c, 0x99, 0x74, + 0x37, 0xb5, 0xe6, 0x3e, 0x40, 0x80, 0xcd, 0x21, 0xc8, 0x25, 0xc8, 0x25, 0xf7, 0x9c, 0x72, 0xce, + 0x13, 0x04, 0xf0, 0x31, 0xe7, 0x3d, 0x38, 0x81, 0x1f, 0x20, 0xc8, 0x31, 0xb9, 0x05, 0xfd, 0x33, + 0xe4, 0x90, 0x33, 0x94, 0x28, 0xdb, 0x27, 0x76, 0x75, 0xd7, 0x57, 0x53, 0x55, 0x5d, 0x5d, 0x5d, + 0xd5, 0x84, 0x85, 0x76, 0x18, 0x70, 0x1a, 0xfa, 0x4e, 0x44, 0x43, 0x1e, 0xa2, 0xa5, 0x5e, 0x78, + 0x38, 0x70, 0x0e, 0xfb, 0x9e, 0xdf, 0x39, 0xf6, 0xb8, 0x73, 0xf2, 0x89, 0x75, 0xab, 0xeb, 0xf1, + 0x17, 0xfd, 0x43, 0xa7, 0x1d, 0xf6, 0x6a, 0xdd, 0xb0, 0x1b, 0xd6, 0x24, 0xe3, 0x61, 0xff, 0x48, + 0x52, 0x92, 0x90, 0x23, 0x25, 0xc0, 0x5a, 0xed, 0x86, 0x61, 0xd7, 0x27, 0x23, 0x2e, 0xee, 0xf5, + 0x08, 0xe3, 0x6e, 0x2f, 0xd2, 0x0c, 0x37, 0x13, 0xf2, 0xc4, 0xc7, 0x6a, 0xf1, 0xc7, 0x6a, 0x2c, + 0xf4, 0x4f, 0x08, 0xad, 0x45, 0x87, 0xb5, 0x30, 0x62, 0x9a, 0xbb, 0x36, 0x95, 0xdb, 0x8d, 0xbc, + 0x1a, 0x1f, 0x44, 0x84, 0xd5, 0xbe, 0x0e, 0xe9, 0x31, 0xa1, 0x1a, 0x50, 0x9f, 0x54, 0x57, 0xe9, + 0xe3, 0x46, 0x1e, 0xd3, 0xc3, 0x1a, 0x8d, 0xda, 0x35, 0xc6, 0x5d, 0xde, 0x8f, 0x3f, 0x72, 0xfb, + 0x14, 0x95, 0xfa, 0xb4, 0x4d, 0xa2, 0xd0, 0xf7, 0xda, 0x03, 0xa1, 0x98, 0x1a, 0x29, 0x98, 0xfd, + 0x5b, 0x03, 0xaa, 0x7b, 0xb4, 0x1f, 0x10, 0x4c, 0x7e, 0xd3, 0x27, 0x8c, 0xa3, 0xcb, 0x50, 0x38, + 0xf2, 0x7c, 0x4e, 0xa8, 0x69, 0xac, 0xe5, 0xd7, 0xcb, 0x58, 0x53, 0x68, 0x09, 0xf2, 0xae, 0xef, + 0x9b, 0xb9, 0x35, 0x63, 0xbd, 0x84, 0xc5, 0x10, 0xad, 0x43, 0xf5, 0x98, 0x90, 0xa8, 0xd9, 0xa7, + 0x2e, 0xf7, 0xc2, 0xc0, 0xcc, 0xaf, 0x19, 0xeb, 0xf9, 0xc6, 0xdc, 0xab, 0xd7, 0xab, 0x06, 0x1e, + 0x5b, 0x41, 0x36, 0x94, 0x05, 0xdd, 0x18, 0x70, 0xc2, 0xcc, 0xb9, 0x04, 0xdb, 0x68, 0xda, 0xbe, + 0x01, 0x4b, 0x4d, 0x8f, 0x1d, 0x3f, 0x65, 0x6e, 0xf7, 0x2c, 0x5d, 0xec, 0x47, 0xb0, 0x9c, 0xe0, + 0x65, 0x51, 0x18, 0x30, 0x82, 0x6e, 0x43, 0x81, 0x92, 0x76, 0x48, 0x3b, 0x92, 0xb9, 0x52, 0xff, + 0xc8, 0x99, 0x0c, 0x03, 0x47, 0x03, 0x04, 0x13, 0xd6, 0xcc, 0xf6, 0x9f, 0xf2, 0x50, 0x49, 0xcc, + 0xa3, 0x45, 0xc8, 0xed, 0x34, 0x4d, 0x63, 0xcd, 0x58, 0x2f, 0xe3, 0xdc, 0x4e, 0x13, 0x99, 0x50, + 0xdc, 0xed, 0x73, 0xf7, 0xd0, 0x27, 0xda, 0xf6, 0x98, 0x44, 0x97, 0x60, 0x7e, 0x27, 0x78, 0xca, + 0x88, 0x34, 0xbc, 0x84, 0x15, 0x81, 0x10, 0xcc, 0xed, 0x7b, 0xdf, 0x10, 0x65, 0x26, 0x96, 0x63, + 0x64, 0x41, 0x61, 0xcf, 0xa5, 0x24, 0xe0, 0xe6, 0xbc, 0x90, 0xdb, 0xc8, 0x99, 0x06, 0xd6, 0x33, + 0xa8, 0x01, 0xe5, 0x6d, 0x4a, 0x5c, 0x4e, 0x3a, 0x5b, 0xdc, 0x2c, 0xac, 0x19, 0xeb, 0x95, 0xba, + 0xe5, 0xa8, 0x4d, 0x76, 0xe2, 0xf8, 0x73, 0x0e, 0xe2, 0xf8, 0x6b, 0x94, 0x5e, 0xbd, 0x5e, 0xfd, + 0xe0, 0x0f, 0xff, 0x14, 0xbe, 0x1b, 0xc2, 0xd0, 0x03, 0x80, 0xc7, 0x2e, 0xe3, 0x4f, 0x99, 0x14, + 0x52, 0x3c, 0x53, 0xc8, 0x9c, 0x14, 0x90, 0xc0, 0xa0, 0x15, 0x00, 0xe9, 0x84, 0xed, 0xb0, 0x1f, + 0x70, 0xb3, 0x24, 0x75, 0x4f, 0xcc, 0xa0, 0x35, 0xa8, 0x34, 0x09, 0x6b, 0x53, 0x2f, 0x92, 0x5b, + 0x5d, 0x96, 0xee, 0x49, 0x4e, 0x09, 0x09, 0xca, 0x83, 0x07, 0x83, 0x88, 0x98, 0x20, 0x19, 0x12, + 0x33, 0x62, 0x2f, 0xf7, 0x5f, 0xb8, 0x94, 0x74, 0xcc, 0x8a, 0x74, 0x97, 0xa6, 0x84, 0x7f, 0x95, + 0x27, 0x98, 0x59, 0x95, 0x9b, 0x1c, 0x93, 0xf6, 0xef, 0x8a, 0x50, 0xdd, 0x17, 0xc7, 0x29, 0x0e, + 0x87, 0x25, 0xc8, 0x63, 0x72, 0xa4, 0xf7, 0x46, 0x0c, 0x91, 0x03, 0xd0, 0x24, 0x47, 0x5e, 0xe0, + 0x49, 0xad, 0x72, 0xd2, 0xf0, 0x45, 0x27, 0x3a, 0x74, 0x46, 0xb3, 0x38, 0xc1, 0x81, 0x2c, 0x28, + 0xb5, 0x5e, 0x46, 0x21, 0x15, 0x21, 0x95, 0x97, 0x62, 0x86, 0x34, 0x7a, 0x06, 0x0b, 0xf1, 0x78, + 0x8b, 0x73, 0x2a, 0x02, 0x55, 0x84, 0xd1, 0x27, 0xe9, 0x30, 0x4a, 0x2a, 0xe5, 0x8c, 0x61, 0x5a, + 0x01, 0xa7, 0x03, 0x3c, 0x2e, 0x47, 0x58, 0xb8, 0x4f, 0x18, 0x13, 0x1a, 0xca, 0xed, 0xc7, 0x31, + 0x29, 0xd4, 0xf9, 0x05, 0x0d, 0x03, 0x4e, 0x82, 0x8e, 0xdc, 0xfa, 0x32, 0x1e, 0xd2, 0x42, 0x9d, + 0x78, 0xac, 0xd4, 0x29, 0xce, 0xa4, 0xce, 0x18, 0x46, 0xab, 0x33, 0x36, 0x87, 0x36, 0x61, 0x7e, + 0xdb, 0x6d, 0xbf, 0x20, 0x72, 0x97, 0x2b, 0xf5, 0x95, 0xb4, 0x40, 0xb9, 0xfc, 0x44, 0x6e, 0x2b, + 0x93, 0x07, 0xf5, 0x03, 0xac, 0x20, 0xe8, 0x57, 0x50, 0x6d, 0x05, 0xdc, 0xe3, 0x3e, 0xe9, 0xc9, + 0x1d, 0x2b, 0x8b, 0x1d, 0x6b, 0x6c, 0x7e, 0xf7, 0x7a, 0xf5, 0x27, 0x53, 0xd3, 0x4f, 0x9f, 0x7b, + 0x7e, 0x8d, 0x24, 0x50, 0x4e, 0x42, 0x04, 0x1e, 0x93, 0x87, 0xbe, 0x82, 0xc5, 0x58, 0xd9, 0x9d, + 0x20, 0xea, 0x73, 0x66, 0x82, 0xb4, 0xba, 0x3e, 0xa3, 0xd5, 0x0a, 0xa4, 0xcc, 0x9e, 0x90, 0x24, + 0x9c, 0xbd, 0x13, 0x70, 0x42, 0x03, 0xd7, 0xd7, 0x21, 0x38, 0xa4, 0xd1, 0x8e, 0x88, 0x34, 0x91, + 0x25, 0xf7, 0x64, 0x6e, 0x34, 0xab, 0xd2, 0x35, 0xd7, 0xd3, 0x5f, 0x4d, 0xe6, 0x52, 0x47, 0x31, + 0xe3, 0x31, 0xa8, 0xf5, 0x00, 0x50, 0x3a, 0x24, 0x44, 0xe8, 0x1e, 0x93, 0x41, 0x1c, 0xba, 0xc7, + 0x64, 0x20, 0xb2, 0xc7, 0x89, 0xeb, 0xf7, 0x55, 0x56, 0x29, 0x63, 0x45, 0x6c, 0xe6, 0xee, 0x1a, + 0x42, 0x42, 0x7a, 0x17, 0xcf, 0x25, 0xe1, 0x0b, 0xb8, 0x98, 0xe1, 0x91, 0x0c, 0x11, 0xd7, 0x92, + 0x22, 0xd2, 0x47, 0x67, 0x24, 0xd2, 0xfe, 0x6b, 0x1e, 0xaa, 0xc9, 0xb8, 0x40, 0x1b, 0x70, 0x51, + 0xd9, 0x89, 0xc9, 0x51, 0x93, 0x44, 0x94, 0xb4, 0x45, 0x32, 0xd2, 0xc2, 0xb3, 0x96, 0x50, 0x1d, + 0x2e, 0xed, 0xf4, 0xf4, 0x34, 0x4b, 0x40, 0x72, 0xf2, 0xd8, 0x67, 0xae, 0xa1, 0x10, 0x3e, 0x54, + 0xa2, 0xa4, 0x27, 0x12, 0xa0, 0xbc, 0x8c, 0x8b, 0xcf, 0x4e, 0x0f, 0x5e, 0x27, 0x13, 0xab, 0xc2, + 0x23, 0x5b, 0x2e, 0xfa, 0x19, 0x14, 0xd5, 0x42, 0x7c, 0xfe, 0x3f, 0x3e, 0xfd, 0x13, 0x4a, 0x58, + 0x8c, 0x11, 0x70, 0x65, 0x07, 0x33, 0xe7, 0xcf, 0x01, 0xd7, 0x18, 0xeb, 0x21, 0x58, 0xd3, 0x55, + 0x3e, 0x4f, 0x08, 0xd8, 0x7f, 0x31, 0x60, 0x39, 0xf5, 0x21, 0x71, 0x39, 0xc9, 0xf4, 0xac, 0x44, + 0xc8, 0x31, 0x6a, 0xc2, 0xbc, 0x4a, 0x30, 0x39, 0xa9, 0xb0, 0x33, 0x83, 0xc2, 0x4e, 0x22, 0xbb, + 0x28, 0xb0, 0x75, 0x17, 0xe0, 0xed, 0x82, 0xd5, 0xfe, 0x9b, 0x01, 0x0b, 0xfa, 0x30, 0xeb, 0x9b, + 0xdc, 0x85, 0xa5, 0xf8, 0x08, 0xc5, 0x73, 0xfa, 0x4e, 0xbf, 0x3d, 0x35, 0x0f, 0x28, 0x36, 0x67, + 0x12, 0xa7, 0x74, 0x4c, 0x89, 0xb3, 0xb6, 0xe3, 0xb8, 0x9a, 0x60, 0x3d, 0x97, 0xe6, 0x57, 0x61, + 0x61, 0x5f, 0x96, 0x60, 0x53, 0x2f, 0x28, 0xfb, 0x3f, 0x06, 0x2c, 0xc6, 0x3c, 0xda, 0xba, 0x4f, + 0xa1, 0x74, 0x42, 0x28, 0x27, 0x2f, 0x09, 0xd3, 0x56, 0x99, 0x69, 0xab, 0xbe, 0x94, 0x1c, 0x78, + 0xc8, 0x89, 0x36, 0xa1, 0xa4, 0xca, 0x3d, 0x12, 0x6f, 0xd4, 0xca, 0x34, 0x94, 0xfe, 0xde, 0x90, + 0x1f, 0xd5, 0x60, 0xce, 0x0f, 0xbb, 0x4c, 0x9f, 0x99, 0xef, 0x4f, 0xc3, 0x3d, 0x0e, 0xbb, 0x58, + 0x32, 0xa2, 0x7b, 0x50, 0xfa, 0xda, 0xa5, 0x81, 0x17, 0x74, 0xe3, 0x53, 0xb0, 0x3a, 0x0d, 0xf4, + 0x4c, 0xf1, 0xe1, 0x21, 0x40, 0x14, 0x54, 0x05, 0xb5, 0x86, 0x1e, 0x41, 0xa1, 0xe3, 0x75, 0x09, + 0xe3, 0xca, 0x25, 0x8d, 0xba, 0xb8, 0x4b, 0xbe, 0x7b, 0xbd, 0x7a, 0x23, 0x71, 0x59, 0x84, 0x11, + 0x09, 0x44, 0xf9, 0xee, 0x7a, 0x01, 0xa1, 0xa2, 0xbc, 0xbd, 0xa5, 0x20, 0x4e, 0x53, 0xfe, 0x60, + 0x2d, 0x41, 0xc8, 0xf2, 0xd4, 0x95, 0x20, 0xf3, 0xc5, 0xdb, 0xc9, 0x52, 0x12, 0xc4, 0x31, 0x08, + 0xdc, 0x1e, 0xd1, 0x25, 0x80, 0x1c, 0x8b, 0xfa, 0xa4, 0x2d, 0xe2, 0xbc, 0x23, 0x2b, 0xb7, 0x12, + 0xd6, 0x14, 0xda, 0x84, 0x22, 0xe3, 0x2e, 0x15, 0x39, 0x67, 0x7e, 0xc6, 0xc2, 0x2a, 0x06, 0xa0, + 0xfb, 0x50, 0x6e, 0x87, 0xbd, 0xc8, 0x27, 0x02, 0x5d, 0x98, 0x11, 0x3d, 0x82, 0x88, 0xd0, 0x23, + 0x94, 0x86, 0x54, 0x96, 0x74, 0x65, 0xac, 0x08, 0x74, 0x07, 0x16, 0x22, 0x1a, 0x76, 0x29, 0x61, + 0xec, 0x73, 0x1a, 0xf6, 0x23, 0x7d, 0x91, 0x2f, 0x8b, 0xe4, 0xbd, 0x97, 0x5c, 0xc0, 0xe3, 0x7c, + 0xf6, 0xbf, 0x73, 0x50, 0x4d, 0x86, 0x48, 0xaa, 0xd6, 0x7d, 0x04, 0x05, 0x15, 0x70, 0x2a, 0xd6, + 0xdf, 0xce, 0xc7, 0x4a, 0x42, 0xa6, 0x8f, 0x4d, 0x28, 0xb6, 0xfb, 0x54, 0x16, 0xc2, 0xaa, 0x3c, + 0x8e, 0x49, 0x61, 0x29, 0x0f, 0xb9, 0xeb, 0x4b, 0x1f, 0xe7, 0xb1, 0x22, 0x44, 0x6d, 0x3c, 0xec, + 0xbc, 0xce, 0x57, 0x1b, 0x0f, 0x61, 0xc9, 0xfd, 0x2b, 0xbe, 0xd3, 0xfe, 0x95, 0xce, 0xbd, 0x7f, + 0xf6, 0xdf, 0x0d, 0x28, 0x0f, 0xcf, 0x56, 0xc2, 0xbb, 0xc6, 0x3b, 0x7b, 0x77, 0xcc, 0x33, 0xb9, + 0xb7, 0xf3, 0xcc, 0x65, 0x28, 0x30, 0x4e, 0x89, 0xdb, 0x53, 0x9d, 0x1b, 0xd6, 0x94, 0xc8, 0x62, + 0x3d, 0xd6, 0x95, 0x3b, 0x54, 0xc5, 0x62, 0x68, 0xff, 0xd7, 0x80, 0x85, 0xb1, 0xe3, 0xfe, 0x5e, + 0x6d, 0xb9, 0x04, 0xf3, 0x3e, 0x39, 0x21, 0xaa, 0xb7, 0xcc, 0x63, 0x45, 0x88, 0x59, 0xf6, 0x22, + 0xa4, 0x5c, 0x2a, 0x57, 0xc5, 0x8a, 0x10, 0x3a, 0x77, 0x08, 0x77, 0x3d, 0x5f, 0xe6, 0xa5, 0x2a, + 0xd6, 0x94, 0xd0, 0xb9, 0x4f, 0x7d, 0x5d, 0x5f, 0x8b, 0x21, 0xb2, 0x61, 0xce, 0x0b, 0x8e, 0x42, + 0x1d, 0x36, 0xb2, 0xb2, 0x51, 0x75, 0xda, 0x4e, 0x70, 0x14, 0x62, 0xb9, 0x86, 0xae, 0x42, 0x81, + 0xba, 0x41, 0x97, 0xc4, 0xc5, 0x75, 0x59, 0x70, 0x61, 0x31, 0x83, 0xf5, 0x82, 0x6d, 0x43, 0x55, + 0xf6, 0xa7, 0xbb, 0x84, 0x89, 0x6e, 0x48, 0x84, 0x75, 0xc7, 0xe5, 0xae, 0x34, 0xbb, 0x8a, 0xe5, + 0xd8, 0xbe, 0x09, 0xe8, 0xb1, 0xc7, 0xf8, 0x33, 0xd9, 0xc2, 0xb3, 0xb3, 0x9a, 0xd7, 0x7d, 0xb8, + 0x38, 0xc6, 0xad, 0xaf, 0x85, 0x9f, 0x4e, 0xb4, 0xaf, 0xd7, 0xd2, 0x19, 0x57, 0xbe, 0x14, 0x38, + 0x0a, 0x38, 0xd1, 0xc5, 0x2e, 0x40, 0x45, 0xda, 0xa5, 0xbe, 0x6d, 0xbb, 0x50, 0x55, 0xa4, 0x16, + 0xfe, 0x05, 0x5c, 0x88, 0x05, 0x7d, 0x49, 0xa8, 0x6c, 0x45, 0x0c, 0xe9, 0x97, 0x1f, 0x4e, 0xfb, + 0x4a, 0x63, 0x9c, 0x1d, 0x4f, 0xe2, 0x6d, 0x02, 0x17, 0x25, 0xcf, 0x43, 0x8f, 0xf1, 0x90, 0x0e, + 0x62, 0xab, 0x57, 0x00, 0xb6, 0xda, 0xdc, 0x3b, 0x21, 0x4f, 0x02, 0x5f, 0x5d, 0xa3, 0x25, 0x9c, + 0x98, 0x89, 0xaf, 0xc8, 0xdc, 0xa8, 0x87, 0xbb, 0x02, 0xe5, 0x96, 0x4b, 0xfd, 0x41, 0xeb, 0xa5, + 0xc7, 0x75, 0x2b, 0x3d, 0x9a, 0xb0, 0x7f, 0x6f, 0xc0, 0x72, 0xf2, 0x3b, 0xad, 0x13, 0x91, 0x2e, + 0xee, 0xc1, 0x1c, 0x8f, 0xeb, 0x98, 0xc5, 0x2c, 0x23, 0x52, 0x10, 0x51, 0xea, 0x60, 0x09, 0x4a, + 0x78, 0x5a, 0x1d, 0x9c, 0x6b, 0xa7, 0xc3, 0x27, 0x3c, 0xfd, 0xbf, 0x12, 0xa0, 0xf4, 0x72, 0x46, + 0x6f, 0x9a, 0x6c, 0xee, 0x72, 0x13, 0xcd, 0xdd, 0xf3, 0xc9, 0xe6, 0x4e, 0x5d, 0xcd, 0x77, 0x66, + 0xd1, 0x64, 0x86, 0x16, 0xef, 0x2e, 0x94, 0xe3, 0xea, 0x26, 0xbe, 0xc0, 0xad, 0xb4, 0xe8, 0x61, + 0x01, 0x34, 0x62, 0x46, 0xeb, 0xf1, 0x8d, 0xa3, 0xee, 0x3a, 0x14, 0xe7, 0x14, 0x1a, 0xb5, 0x1d, + 0x5d, 0x57, 0xe8, 0x5b, 0xe8, 0xfe, 0xf9, 0xde, 0x2d, 0xe6, 0x26, 0xdf, 0x2c, 0x1a, 0x50, 0xd9, + 0x8e, 0x13, 0xe5, 0x39, 0x1e, 0x2d, 0x92, 0x20, 0xb4, 0xa1, 0x0b, 0x1b, 0x95, 0x9a, 0xaf, 0xa4, + 0x4d, 0x8c, 0x1f, 0x28, 0x42, 0xaa, 0x2b, 0x9b, 0xa3, 0x8c, 0xd2, 0xb2, 0x2c, 0x1d, 0xb4, 0x39, + 0x93, 0xef, 0x67, 0xac, 0x2f, 0xd1, 0x67, 0x50, 0xc0, 0x84, 0xf5, 0x7d, 0x2e, 0x5f, 0x42, 0x2a, + 0xf5, 0xab, 0x53, 0xa4, 0x2b, 0x26, 0x79, 0x56, 0x35, 0x00, 0xfd, 0x12, 0x8a, 0x6a, 0xc4, 0xcc, + 0xca, 0xb4, 0x96, 0x3f, 0x43, 0x33, 0x8d, 0xd1, 0x0d, 0x85, 0xa6, 0xc4, 0x71, 0xfc, 0x9c, 0x04, + 0x44, 0xbf, 0xd0, 0x89, 0xb6, 0x76, 0x1e, 0x27, 0x66, 0x50, 0x1d, 0xe6, 0x39, 0x75, 0xdb, 0xc4, + 0x5c, 0x98, 0xc1, 0x85, 0x8a, 0x55, 0x24, 0xb6, 0xc8, 0x0b, 0x02, 0xd2, 0x31, 0x17, 0x55, 0xa5, + 0xa4, 0x28, 0xf4, 0x03, 0x58, 0x0c, 0xfa, 0x3d, 0xd9, 0x2c, 0x74, 0xf6, 0x39, 0x89, 0x98, 0x79, + 0x41, 0x7e, 0x6f, 0x62, 0x16, 0x5d, 0x83, 0x85, 0xa0, 0xdf, 0x3b, 0x10, 0x37, 0xbc, 0x62, 0x5b, + 0x92, 0x6c, 0xe3, 0x93, 0xe8, 0x26, 0x2c, 0x0b, 0x5c, 0xbc, 0xdb, 0x8a, 0x73, 0x59, 0x72, 0xa6, + 0x17, 0xde, 0x43, 0xcf, 0xfc, 0x3e, 0x3a, 0x02, 0xeb, 0x39, 0x54, 0x93, 0xfb, 0x90, 0x81, 0xbd, + 0x33, 0xde, 0x71, 0xcf, 0x10, 0x17, 0x89, 0x86, 0xe3, 0x39, 0x7c, 0xef, 0x69, 0xd4, 0x71, 0x39, + 0xc9, 0xca, 0xbc, 0xe9, 0x0c, 0x74, 0x19, 0x0a, 0x7b, 0x6a, 0xa3, 0xd4, 0xcb, 0xa5, 0xa6, 0xc4, + 0x7c, 0x93, 0x08, 0xe7, 0xe9, 0x74, 0xab, 0x29, 0xfb, 0x0a, 0x58, 0x59, 0xe2, 0x95, 0x33, 0xec, + 0x3f, 0xe7, 0x00, 0x46, 0xc1, 0x80, 0x3e, 0x02, 0xe8, 0x91, 0x8e, 0xe7, 0xfe, 0x9a, 0x8f, 0x1a, + 0xca, 0xb2, 0x9c, 0x91, 0x5d, 0xe5, 0xa8, 0xf4, 0xcf, 0xbd, 0x73, 0xe9, 0x8f, 0x60, 0x8e, 0x79, + 0xdf, 0x10, 0x5d, 0xa6, 0xc8, 0x31, 0x7a, 0x02, 0x15, 0x37, 0x08, 0x42, 0x2e, 0xc3, 0x38, 0x6e, + 0xb6, 0x6f, 0x9d, 0x16, 0xbe, 0xce, 0xd6, 0x88, 0x5f, 0x9d, 0x92, 0xa4, 0x04, 0xeb, 0x3e, 0x2c, + 0x4d, 0x32, 0x9c, 0xab, 0x19, 0xfc, 0xd6, 0x80, 0x0b, 0x13, 0x5b, 0x87, 0x3e, 0x1d, 0x66, 0x01, + 0x63, 0x86, 0xe3, 0x15, 0x27, 0x80, 0x07, 0x50, 0xdd, 0xe2, 0x5c, 0x64, 0x3d, 0x65, 0x9b, 0x6a, + 0xf7, 0x4e, 0xc7, 0x8e, 0x21, 0xec, 0x3f, 0x1a, 0xa3, 0x77, 0xce, 0xcc, 0x9e, 0xff, 0xde, 0x78, + 0xcf, 0x7f, 0x7d, 0xfa, 0xe5, 0xf0, 0x3e, 0x5b, 0xfd, 0x1b, 0x3f, 0x87, 0x0f, 0x33, 0x2f, 0x66, + 0x54, 0x81, 0xe2, 0xfe, 0xc1, 0x16, 0x3e, 0x68, 0x35, 0x97, 0x3e, 0x40, 0x55, 0x28, 0x6d, 0x3f, + 0xd9, 0xdd, 0x7b, 0xdc, 0x3a, 0x68, 0x2d, 0x19, 0x62, 0xa9, 0xd9, 0x12, 0xe3, 0xe6, 0x52, 0xae, + 0xfe, 0x6d, 0x01, 0x8a, 0xdb, 0xea, 0xbf, 0x1e, 0x74, 0x00, 0xe5, 0xe1, 0x9f, 0x00, 0xc8, 0xce, + 0xf0, 0xce, 0xc4, 0xbf, 0x09, 0xd6, 0xc7, 0xa7, 0xf2, 0xe8, 0xc4, 0xfd, 0x10, 0xe6, 0xe5, 0xdf, + 0x21, 0x28, 0xa3, 0xbd, 0x4e, 0xfe, 0x4f, 0x62, 0x9d, 0xfe, 0xf7, 0xc2, 0x86, 0x21, 0x24, 0xc9, + 0xb7, 0x89, 0x2c, 0x49, 0xc9, 0xc7, 0x4b, 0x6b, 0xf5, 0x8c, 0x47, 0x0d, 0xb4, 0x0b, 0x05, 0xdd, + 0xb0, 0x65, 0xb1, 0x26, 0x5f, 0x20, 0xac, 0xb5, 0xe9, 0x0c, 0x4a, 0xd8, 0x86, 0x81, 0x76, 0x87, + 0xef, 0xd1, 0x59, 0xaa, 0x25, 0xab, 0x5d, 0xeb, 0x8c, 0xf5, 0x75, 0x63, 0xc3, 0x40, 0x5f, 0x41, + 0x25, 0x51, 0xcf, 0xa2, 0x8c, 0x6a, 0x2a, 0x5d, 0x1c, 0x5b, 0xd7, 0xcf, 0xe0, 0xd2, 0x96, 0xb7, + 0x60, 0x4e, 0x1e, 0xa4, 0x0c, 0x67, 0x27, 0xca, 0xdd, 0x2c, 0x35, 0xc7, 0xca, 0xdf, 0x43, 0x55, + 0xa0, 0x93, 0x20, 0x19, 0x7d, 0xe8, 0xfa, 0x59, 0xf7, 0xea, 0xd4, 0xb0, 0x49, 0x05, 0xf1, 0x86, + 0x81, 0x42, 0x40, 0xe9, 0xe4, 0x89, 0x7e, 0x94, 0x11, 0x25, 0xd3, 0x32, 0xb8, 0x75, 0x73, 0x36, + 0x66, 0x65, 0x54, 0xa3, 0xfa, 0xea, 0xcd, 0x8a, 0xf1, 0x8f, 0x37, 0x2b, 0xc6, 0xbf, 0xde, 0xac, + 0x18, 0x87, 0x05, 0x59, 0x31, 0xfd, 0xf8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7c, 0xb8, 0xc3, + 0x68, 0x0b, 0x1d, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1387,6 +2149,9 @@ type ControlClient interface { Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) + Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) + ListenBuildHistory(ctx context.Context, in *BuildHistoryRequest, opts ...grpc.CallOption) (Control_ListenBuildHistoryClient, error) + UpdateBuildHistory(ctx context.Context, in *UpdateBuildHistoryRequest, opts ...grpc.CallOption) (*UpdateBuildHistoryResponse, error) } type controlClient struct { @@ -1519,6 +2284,56 @@ func (c *controlClient) ListWorkers(ctx context.Context, in *ListWorkersRequest, return out, nil } +func (c *controlClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) { + out := new(InfoResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/Info", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListenBuildHistory(ctx context.Context, in *BuildHistoryRequest, opts ...grpc.CallOption) (Control_ListenBuildHistoryClient, error) { + stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[3], "/moby.buildkit.v1.Control/ListenBuildHistory", opts...) + if err != nil { + return nil, err + } + x := &controlListenBuildHistoryClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Control_ListenBuildHistoryClient interface { + Recv() (*BuildHistoryEvent, error) + grpc.ClientStream +} + +type controlListenBuildHistoryClient struct { + grpc.ClientStream +} + +func (x *controlListenBuildHistoryClient) Recv() (*BuildHistoryEvent, error) { + m := new(BuildHistoryEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *controlClient) UpdateBuildHistory(ctx context.Context, in *UpdateBuildHistoryRequest, opts ...grpc.CallOption) (*UpdateBuildHistoryResponse, error) { + out := new(UpdateBuildHistoryResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/UpdateBuildHistory", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // ControlServer is the server API for Control service. type ControlServer interface { DiskUsage(context.Context, *DiskUsageRequest) (*DiskUsageResponse, error) @@ -1527,6 +2342,9 @@ type ControlServer interface { Status(*StatusRequest, Control_StatusServer) error Session(Control_SessionServer) error ListWorkers(context.Context, *ListWorkersRequest) (*ListWorkersResponse, error) + Info(context.Context, *InfoRequest) (*InfoResponse, error) + ListenBuildHistory(*BuildHistoryRequest, Control_ListenBuildHistoryServer) error + UpdateBuildHistory(context.Context, *UpdateBuildHistoryRequest) (*UpdateBuildHistoryResponse, error) } // UnimplementedControlServer can be embedded to have forward compatible implementations. @@ -1551,6 +2369,15 @@ func (*UnimplementedControlServer) Session(srv Control_SessionServer) error { func (*UnimplementedControlServer) ListWorkers(ctx context.Context, req *ListWorkersRequest) (*ListWorkersResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListWorkers not implemented") } +func (*UnimplementedControlServer) Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") +} +func (*UnimplementedControlServer) ListenBuildHistory(req *BuildHistoryRequest, srv Control_ListenBuildHistoryServer) error { + return status.Errorf(codes.Unimplemented, "method ListenBuildHistory not implemented") +} +func (*UnimplementedControlServer) UpdateBuildHistory(ctx context.Context, req *UpdateBuildHistoryRequest) (*UpdateBuildHistoryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateBuildHistory not implemented") +} func RegisterControlServer(s *grpc.Server, srv ControlServer) { s.RegisterService(&_Control_serviceDesc, srv) @@ -1678,6 +2505,63 @@ func _Control_ListWorkers_Handler(srv interface{}, ctx context.Context, dec func return interceptor(ctx, in, info, handler) } +func _Control_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).Info(ctx, req.(*InfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListenBuildHistory_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(BuildHistoryRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ControlServer).ListenBuildHistory(m, &controlListenBuildHistoryServer{stream}) +} + +type Control_ListenBuildHistoryServer interface { + Send(*BuildHistoryEvent) error + grpc.ServerStream +} + +type controlListenBuildHistoryServer struct { + grpc.ServerStream +} + +func (x *controlListenBuildHistoryServer) Send(m *BuildHistoryEvent) error { + return x.ServerStream.SendMsg(m) +} + +func _Control_UpdateBuildHistory_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateBuildHistoryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateBuildHistory(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.Control/UpdateBuildHistory", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateBuildHistory(ctx, req.(*UpdateBuildHistoryRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Control_serviceDesc = grpc.ServiceDesc{ ServiceName: "moby.buildkit.v1.Control", HandlerType: (*ControlServer)(nil), @@ -1694,6 +2578,14 @@ var _Control_serviceDesc = grpc.ServiceDesc{ MethodName: "ListWorkers", Handler: _Control_ListWorkers_Handler, }, + { + MethodName: "Info", + Handler: _Control_Info_Handler, + }, + { + MethodName: "UpdateBuildHistory", + Handler: _Control_UpdateBuildHistory_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -1712,6 +2604,11 @@ var _Control_serviceDesc = grpc.ServiceDesc{ ServerStreams: true, ClientStreams: true, }, + { + StreamName: "ListenBuildHistory", + Handler: _Control_ListenBuildHistory_Handler, + ServerStreams: true, + }, }, Metadata: "control.proto", } @@ -1995,6 +2892,28 @@ func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.SourcePolicy != nil { + { + size, err := m.SourcePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.Internal { + i-- + if m.Internal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } if len(m.FrontendInputs) > 0 { for k := range m.FrontendInputs { v := m.FrontendInputs[k] @@ -2471,23 +3390,23 @@ func (m *Vertex) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x3a } if m.Completed != nil { - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) - if err7 != nil { - return 0, err7 - } - i -= n7 - i = encodeVarintControl(dAtA, i, uint64(n7)) - i-- - dAtA[i] = 0x32 - } - if m.Started != nil { - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) + n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) if err8 != nil { return 0, err8 } i -= n8 i = encodeVarintControl(dAtA, i, uint64(n8)) i-- + dAtA[i] = 0x32 + } + if m.Started != nil { + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) + if err9 != nil { + return 0, err9 + } + i -= n9 + i = encodeVarintControl(dAtA, i, uint64(n9)) + i-- dAtA[i] = 0x2a } if m.Cached { @@ -2551,31 +3470,31 @@ func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { copy(dAtA[i:], m.XXX_unrecognized) } if m.Completed != nil { - n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) - if err9 != nil { - return 0, err9 - } - i -= n9 - i = encodeVarintControl(dAtA, i, uint64(n9)) - i-- - dAtA[i] = 0x42 - } - if m.Started != nil { - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) + n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) if err10 != nil { return 0, err10 } i -= n10 i = encodeVarintControl(dAtA, i, uint64(n10)) i-- + dAtA[i] = 0x42 + } + if m.Started != nil { + n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) + if err11 != nil { + return 0, err11 + } + i -= n11 + i = encodeVarintControl(dAtA, i, uint64(n11)) + i-- dAtA[i] = 0x3a } - n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err11 != nil { - return 0, err11 + n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err12 != nil { + return 0, err12 } - i -= n11 - i = encodeVarintControl(dAtA, i, uint64(n11)) + i -= n12 + i = encodeVarintControl(dAtA, i, uint64(n12)) i-- dAtA[i] = 0x32 if m.Total != 0 { @@ -2648,12 +3567,12 @@ func (m *VertexLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x18 } - n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err12 != nil { - return 0, err12 + n13, err13 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err13 != nil { + return 0, err13 } - i -= n12 - i = encodeVarintControl(dAtA, i, uint64(n12)) + i -= n13 + i = encodeVarintControl(dAtA, i, uint64(n13)) i-- dAtA[i] = 0x12 if len(m.Vertex) > 0 { @@ -2865,6 +3784,643 @@ func (m *ListWorkersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *InfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *InfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.BuildkitVersion != nil { + { + size, err := m.BuildkitVersion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BuildHistoryRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildHistoryRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildHistoryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.EarlyExit { + i-- + if m.EarlyExit { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0x12 + } + if m.ActiveOnly { + i-- + if m.ActiveOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BuildHistoryEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildHistoryEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildHistoryEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Record != nil { + { + size, err := m.Record.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BuildHistoryRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildHistoryRecord) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildHistoryRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.NumCompletedSteps != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.NumCompletedSteps)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if m.NumTotalSteps != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.NumTotalSteps)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.NumCachedSteps != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.NumCachedSteps)) + i-- + dAtA[i] = 0x78 + } + if m.Pinned { + i-- + if m.Pinned { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if m.Trace != nil { + { + size, err := m.Trace.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + if m.Generation != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x60 + } + if len(m.Results) > 0 { + for k := range m.Results { + v := m.Results[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if len(m.ExporterResponse) > 0 { + for k := range m.ExporterResponse { + v := m.ExporterResponse[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x4a + } + } + if m.Logs != nil { + { + size, err := m.Logs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.CompletedAt != nil { + n21, err21 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.CompletedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.CompletedAt):]) + if err21 != nil { + return 0, err21 + } + i -= n21 + i = encodeVarintControl(dAtA, i, uint64(n21)) + i-- + dAtA[i] = 0x3a + } + if m.CreatedAt != nil { + n22, err22 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.CreatedAt):]) + if err22 != nil { + return 0, err22 + } + i -= n22 + i = encodeVarintControl(dAtA, i, uint64(n22)) + i-- + dAtA[i] = 0x32 + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Exporters) > 0 { + for iNdEx := len(m.Exporters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exporters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.FrontendAttrs) > 0 { + for k := range m.FrontendAttrs { + v := m.FrontendAttrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Frontend) > 0 { + i -= len(m.Frontend) + copy(dAtA[i:], m.Frontend) + i = encodeVarintControl(dAtA, i, uint64(len(m.Frontend))) + i-- + dAtA[i] = 0x12 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateBuildHistoryRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateBuildHistoryRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateBuildHistoryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Delete { + i-- + if m.Delete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Pinned { + i-- + if m.Pinned { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateBuildHistoryResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateBuildHistoryResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateBuildHistoryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *Descriptor) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Descriptor) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.Size_ != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Size_)) + i-- + dAtA[i] = 0x18 + } + if len(m.Digest) > 0 { + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintControl(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0x12 + } + if len(m.MediaType) > 0 { + i -= len(m.MediaType) + copy(dAtA[i:], m.MediaType) + i = encodeVarintControl(dAtA, i, uint64(len(m.MediaType))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BuildResultInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildResultInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildResultInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Attestations) > 0 { + for iNdEx := len(m.Attestations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attestations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Exporter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exporter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Exporter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Attrs) > 0 { + for k := range m.Attrs { + v := m.Attrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintControl(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintControl(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintControl(dAtA []byte, offset int, v uint64) int { offset -= sovControl(v) base := offset @@ -3057,6 +4613,13 @@ func (m *SolveRequest) Size() (n int) { n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) } } + if m.Internal { + n += 2 + } + if m.SourcePolicy != nil { + l = m.SourcePolicy.Size() + n += 1 + l + sovControl(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3406,6 +4969,280 @@ func (m *ListWorkersResponse) Size() (n int) { return n } +func (m *InfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BuildkitVersion != nil { + l = m.BuildkitVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BuildHistoryRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ActiveOnly { + n += 2 + } + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.EarlyExit { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BuildHistoryEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovControl(uint64(m.Type)) + } + if m.Record != nil { + l = m.Record.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BuildHistoryRecord) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Frontend) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.FrontendAttrs) > 0 { + for k, v := range m.FrontendAttrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.Exporters) > 0 { + for _, e := range m.Exporters { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.CreatedAt != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.CreatedAt) + n += 1 + l + sovControl(uint64(l)) + } + if m.CompletedAt != nil { + l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.CompletedAt) + n += 1 + l + sovControl(uint64(l)) + } + if m.Logs != nil { + l = m.Logs.Size() + n += 1 + l + sovControl(uint64(l)) + } + if len(m.ExporterResponse) > 0 { + for k, v := range m.ExporterResponse { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Results) > 0 { + for k, v := range m.Results { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovControl(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + l + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if m.Generation != 0 { + n += 1 + sovControl(uint64(m.Generation)) + } + if m.Trace != nil { + l = m.Trace.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Pinned { + n += 2 + } + if m.NumCachedSteps != 0 { + n += 1 + sovControl(uint64(m.NumCachedSteps)) + } + if m.NumTotalSteps != 0 { + n += 2 + sovControl(uint64(m.NumTotalSteps)) + } + if m.NumCompletedSteps != 0 { + n += 2 + sovControl(uint64(m.NumCompletedSteps)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UpdateBuildHistoryRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Pinned { + n += 2 + } + if m.Delete { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UpdateBuildHistoryResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Descriptor) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MediaType) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Size_ != 0 { + n += 1 + sovControl(uint64(m.Size_)) + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BuildResultInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Attestations) > 0 { + for _, e := range m.Attestations { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Exporter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovControl(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -4740,6 +6577,62 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { } m.FrontendInputs[mapkey] = mapvalue iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Internal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Internal = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourcePolicy == nil { + m.SourcePolicy = &pb1.Policy{} + } + if err := m.SourcePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipControl(dAtA[iNdEx:]) @@ -7019,6 +8912,1983 @@ func (m *ListWorkersResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *InfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildkitVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BuildkitVersion == nil { + m.BuildkitVersion = &types.BuildkitVersion{} + } + if err := m.BuildkitVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildHistoryRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildHistoryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildHistoryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveOnly = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EarlyExit", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EarlyExit = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildHistoryEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildHistoryEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildHistoryEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= BuildHistoryEventType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Record == nil { + m.Record = &BuildHistoryRecord{} + } + if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildHistoryRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildHistoryRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildHistoryRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Frontend = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendAttrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FrontendAttrs == nil { + m.FrontendAttrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.FrontendAttrs[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exporters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exporters = append(m.Exporters, &Exporter{}) + if err := m.Exporters[len(m.Exporters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &rpc.Status{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedAt == nil { + m.CreatedAt = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletedAt == nil { + m.CompletedAt = new(time.Time) + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.CompletedAt, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Logs == nil { + m.Logs = &Descriptor{} + } + if err := m.Logs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExporterResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExporterResponse == nil { + m.ExporterResponse = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ExporterResponse[mapkey] = mapvalue + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &BuildResultInfo{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Results == nil { + m.Results = make(map[string]*BuildResultInfo) + } + var mapkey string + var mapvalue *BuildResultInfo + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthControl + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthControl + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &BuildResultInfo{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Results[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Trace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Trace == nil { + m.Trace = &Descriptor{} + } + if err := m.Trace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pinned", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Pinned = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumCachedSteps", wireType) + } + m.NumCachedSteps = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumCachedSteps |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumTotalSteps", wireType) + } + m.NumTotalSteps = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumTotalSteps |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumCompletedSteps", wireType) + } + m.NumCompletedSteps = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumCompletedSteps |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateBuildHistoryRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateBuildHistoryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateBuildHistoryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pinned", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Pinned = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Delete = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateBuildHistoryResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateBuildHistoryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateBuildHistoryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Descriptor) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Descriptor: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Descriptor: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildResultInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildResultInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildResultInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &Descriptor{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attestations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attestations = append(m.Attestations, &Descriptor{}) + if err := m.Attestations[len(m.Attestations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Exporter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exporter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exporter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attrs == nil { + m.Attrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthControl + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipControl(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.proto b/vendor/github.com/moby/buildkit/api/services/control/control.proto index a468a293af..327c9eeaf4 100644 --- a/vendor/github.com/moby/buildkit/api/services/control/control.proto +++ b/vendor/github.com/moby/buildkit/api/services/control/control.proto @@ -6,6 +6,9 @@ import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "github.com/moby/buildkit/solver/pb/ops.proto"; import "github.com/moby/buildkit/api/types/worker.proto"; +// import "github.com/containerd/containerd/api/types/descriptor.proto"; +import "github.com/gogo/googleapis/google/rpc/status.proto"; +import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto"; option (gogoproto.sizer_all) = true; option (gogoproto.marshaler_all) = true; @@ -18,7 +21,10 @@ service Control { rpc Status(StatusRequest) returns (stream StatusResponse); rpc Session(stream BytesMessage) returns (stream BytesMessage); rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse); - // rpc Info(InfoRequest) returns (InfoResponse); + rpc Info(InfoRequest) returns (InfoResponse); + + rpc ListenBuildHistory(BuildHistoryRequest) returns (stream BuildHistoryEvent); + rpc UpdateBuildHistory(UpdateBuildHistoryRequest) returns (UpdateBuildHistoryResponse); } message PruneRequest { @@ -62,6 +68,8 @@ message SolveRequest { CacheOptions Cache = 8 [(gogoproto.nullable) = false]; repeated string Entitlements = 9 [(gogoproto.customtype) = "github.com/moby/buildkit/util/entitlements.Entitlement" ]; map FrontendInputs = 10; + bool Internal = 11; // Internal builds are not recorded in build history + moby.buildkit.v1.sourcepolicy.Policy SourcePolicy = 12; } message CacheOptions { @@ -157,3 +165,73 @@ message ListWorkersRequest { message ListWorkersResponse { repeated moby.buildkit.v1.types.WorkerRecord record = 1; } + +message InfoRequest {} + +message InfoResponse { + moby.buildkit.v1.types.BuildkitVersion buildkitVersion = 1; +} + +message BuildHistoryRequest { + bool ActiveOnly = 1; + string Ref = 2; + bool EarlyExit = 3; +} + +enum BuildHistoryEventType { + STARTED = 0; + COMPLETE = 1; + DELETED = 2; +} + +message BuildHistoryEvent { + BuildHistoryEventType type = 1; + BuildHistoryRecord record = 2; +} + +message BuildHistoryRecord { + string Ref = 1; + string Frontend = 2; + map FrontendAttrs = 3; + repeated Exporter Exporters = 4; + google.rpc.Status error = 5; + google.protobuf.Timestamp CreatedAt = 6 [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp CompletedAt = 7 [(gogoproto.stdtime) = true]; + Descriptor logs = 8; + map ExporterResponse = 9; + BuildResultInfo Result = 10; + map Results = 11; + int32 Generation = 12; + Descriptor trace = 13; + bool pinned = 14; + int32 numCachedSteps = 15; + int32 numTotalSteps = 16; + int32 numCompletedSteps = 17; + // TODO: tags + // TODO: unclipped logs +} + +message UpdateBuildHistoryRequest { + string Ref = 1; + bool Pinned = 2; + bool Delete = 3; +} + +message UpdateBuildHistoryResponse {} + +message Descriptor { + string media_type = 1; + string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + int64 size = 3; + map annotations = 5; +} + +message BuildResultInfo { + Descriptor Result = 1; + repeated Descriptor Attestations = 2; +} + +message Exporter { + string Type = 1; + map Attrs = 2; +} diff --git a/vendor/github.com/moby/buildkit/api/types/worker.pb.go b/vendor/github.com/moby/buildkit/api/types/worker.pb.go index 54cbd605e1..e1b3928cba 100644 --- a/vendor/github.com/moby/buildkit/api/types/worker.pb.go +++ b/vendor/github.com/moby/buildkit/api/types/worker.pb.go @@ -29,6 +29,7 @@ type WorkerRecord struct { Labels map[string]string `protobuf:"bytes,2,rep,name=Labels,proto3" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Platforms []pb.Platform `protobuf:"bytes,3,rep,name=platforms,proto3" json:"platforms"` GCPolicy []*GCPolicy `protobuf:"bytes,4,rep,name=GCPolicy,proto3" json:"GCPolicy,omitempty"` + BuildkitVersion *BuildkitVersion `protobuf:"bytes,5,opt,name=BuildkitVersion,proto3" json:"BuildkitVersion,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -95,6 +96,13 @@ func (m *WorkerRecord) GetGCPolicy() []*GCPolicy { return nil } +func (m *WorkerRecord) GetBuildkitVersion() *BuildkitVersion { + if m != nil { + return m.BuildkitVersion + } + return nil +} + type GCPolicy struct { All bool `protobuf:"varint,1,opt,name=all,proto3" json:"all,omitempty"` KeepDuration int64 `protobuf:"varint,2,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"` @@ -166,39 +174,106 @@ func (m *GCPolicy) GetFilters() []string { return nil } +type BuildkitVersion struct { + Package string `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Revision string `protobuf:"bytes,3,opt,name=revision,proto3" json:"revision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BuildkitVersion) Reset() { *m = BuildkitVersion{} } +func (m *BuildkitVersion) String() string { return proto.CompactTextString(m) } +func (*BuildkitVersion) ProtoMessage() {} +func (*BuildkitVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_e4ff6184b07e587a, []int{2} +} +func (m *BuildkitVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildkitVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BuildkitVersion.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BuildkitVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildkitVersion.Merge(m, src) +} +func (m *BuildkitVersion) XXX_Size() int { + return m.Size() +} +func (m *BuildkitVersion) XXX_DiscardUnknown() { + xxx_messageInfo_BuildkitVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildkitVersion proto.InternalMessageInfo + +func (m *BuildkitVersion) GetPackage() string { + if m != nil { + return m.Package + } + return "" +} + +func (m *BuildkitVersion) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *BuildkitVersion) GetRevision() string { + if m != nil { + return m.Revision + } + return "" +} + func init() { proto.RegisterType((*WorkerRecord)(nil), "moby.buildkit.v1.types.WorkerRecord") proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.types.WorkerRecord.LabelsEntry") proto.RegisterType((*GCPolicy)(nil), "moby.buildkit.v1.types.GCPolicy") + proto.RegisterType((*BuildkitVersion)(nil), "moby.buildkit.v1.types.BuildkitVersion") } func init() { proto.RegisterFile("worker.proto", fileDescriptor_e4ff6184b07e587a) } var fileDescriptor_e4ff6184b07e587a = []byte{ - // 355 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x4e, 0xea, 0x40, - 0x14, 0x86, 0x6f, 0x5b, 0x2e, 0x97, 0x0e, 0xcd, 0x8d, 0x99, 0x18, 0xd3, 0x10, 0x83, 0x84, 0x15, - 0x0b, 0x9d, 0xa2, 0x6e, 0xd4, 0xb8, 0x42, 0x8c, 0x92, 0xb8, 0x20, 0xb3, 0x71, 0xdd, 0x81, 0x01, - 0x9b, 0x0e, 0x9c, 0xc9, 0x74, 0x8a, 0xf6, 0x39, 0x7c, 0x29, 0x96, 0x3e, 0x81, 0x31, 0x3c, 0x89, - 0x99, 0x29, 0x08, 0x26, 0xba, 0x3b, 0xff, 0x9f, 0xff, 0xfb, 0xe7, 0x9c, 0x0c, 0x0a, 0x9e, 0x41, - 0xa5, 0x5c, 0x11, 0xa9, 0x40, 0x03, 0x3e, 0x98, 0x01, 0x2b, 0x08, 0xcb, 0x13, 0x31, 0x4e, 0x13, - 0x4d, 0x16, 0xa7, 0x44, 0x17, 0x92, 0x67, 0x8d, 0x93, 0x69, 0xa2, 0x9f, 0x72, 0x46, 0x46, 0x30, - 0x8b, 0xa6, 0x30, 0x85, 0xc8, 0xc6, 0x59, 0x3e, 0xb1, 0xca, 0x0a, 0x3b, 0x95, 0x35, 0x8d, 0xe3, - 0x9d, 0xb8, 0x69, 0x8c, 0x36, 0x8d, 0x51, 0x06, 0x62, 0xc1, 0x55, 0x24, 0x59, 0x04, 0x32, 0x2b, - 0xd3, 0xed, 0x57, 0x17, 0x05, 0x8f, 0x76, 0x0b, 0xca, 0x47, 0xa0, 0xc6, 0xf8, 0x3f, 0x72, 0x07, - 0xfd, 0xd0, 0x69, 0x39, 0x1d, 0x9f, 0xba, 0x83, 0x3e, 0xbe, 0x47, 0xd5, 0x87, 0x98, 0x71, 0x91, - 0x85, 0x6e, 0xcb, 0xeb, 0xd4, 0xcf, 0xba, 0xe4, 0xe7, 0x35, 0xc9, 0x6e, 0x0b, 0x29, 0x91, 0xdb, - 0xb9, 0x56, 0x05, 0x5d, 0xf3, 0xb8, 0x8b, 0x7c, 0x29, 0x62, 0x3d, 0x01, 0x35, 0xcb, 0x42, 0xcf, - 0x96, 0x05, 0x44, 0x32, 0x32, 0x5c, 0x9b, 0xbd, 0xca, 0xf2, 0xfd, 0xe8, 0x0f, 0xdd, 0x86, 0xf0, - 0x35, 0xaa, 0xdd, 0xdd, 0x0c, 0x41, 0x24, 0xa3, 0x22, 0xac, 0x58, 0xa0, 0xf5, 0xdb, 0xeb, 0x9b, - 0x1c, 0xfd, 0x22, 0x1a, 0x97, 0xa8, 0xbe, 0xb3, 0x06, 0xde, 0x43, 0x5e, 0xca, 0x8b, 0xf5, 0x65, - 0x66, 0xc4, 0xfb, 0xe8, 0xef, 0x22, 0x16, 0x39, 0x0f, 0x5d, 0xeb, 0x95, 0xe2, 0xca, 0xbd, 0x70, - 0xda, 0x2f, 0xdb, 0x87, 0x0d, 0x17, 0x0b, 0x61, 0xb9, 0x1a, 0x35, 0x23, 0x6e, 0xa3, 0x20, 0xe5, - 0x5c, 0xf6, 0x73, 0x15, 0xeb, 0x04, 0xe6, 0x16, 0xf7, 0xe8, 0x37, 0x0f, 0x1f, 0x22, 0xdf, 0xe8, - 0x5e, 0xa1, 0xb9, 0x39, 0xd6, 0x04, 0xb6, 0x06, 0x0e, 0xd1, 0xbf, 0x49, 0x22, 0x34, 0x57, 0x99, - 0xbd, 0xcb, 0xa7, 0x1b, 0xd9, 0x0b, 0x96, 0xab, 0xa6, 0xf3, 0xb6, 0x6a, 0x3a, 0x1f, 0xab, 0xa6, - 0xc3, 0xaa, 0xf6, 0x93, 0xce, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x79, 0x52, 0x6a, 0x29, - 0x02, 0x00, 0x00, + // 416 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x52, 0xc1, 0x8e, 0xd3, 0x30, + 0x10, 0x25, 0xc9, 0xee, 0xd2, 0xb8, 0x11, 0x20, 0x0b, 0xa1, 0x28, 0x42, 0x25, 0xca, 0x85, 0x1e, + 0xc0, 0x59, 0x96, 0x0b, 0x20, 0x4e, 0xa1, 0x08, 0x56, 0xe2, 0xb0, 0xf8, 0x00, 0x67, 0x3b, 0xeb, + 0x86, 0x28, 0xee, 0xda, 0x72, 0x9c, 0x40, 0xfe, 0xb0, 0x47, 0xbe, 0x00, 0xa1, 0x1e, 0xf8, 0x0e, + 0x64, 0x27, 0x69, 0x4b, 0xd9, 0xde, 0xe6, 0xcd, 0xbc, 0xf7, 0x3c, 0xf3, 0x64, 0x10, 0x7c, 0x17, + 0xaa, 0x62, 0x0a, 0x49, 0x25, 0xb4, 0x80, 0x8f, 0x56, 0x82, 0x76, 0x88, 0x36, 0x25, 0xbf, 0xae, + 0x4a, 0x8d, 0xda, 0x17, 0x48, 0x77, 0x92, 0xd5, 0xd1, 0xf3, 0xa2, 0xd4, 0xdf, 0x1a, 0x8a, 0x72, + 0xb1, 0x4a, 0x0b, 0x51, 0x88, 0xd4, 0xd2, 0x69, 0xb3, 0xb4, 0xc8, 0x02, 0x5b, 0xf5, 0x36, 0xd1, + 0xb3, 0x3d, 0xba, 0x71, 0x4c, 0x47, 0xc7, 0xb4, 0x16, 0xbc, 0x65, 0x2a, 0x95, 0x34, 0x15, 0xb2, + 0xee, 0xd9, 0xc9, 0x1f, 0x17, 0x04, 0x5f, 0xed, 0x16, 0x98, 0xe5, 0x42, 0x5d, 0xc3, 0x7b, 0xc0, + 0xbd, 0x5c, 0x84, 0x4e, 0xec, 0xcc, 0x7d, 0xec, 0x5e, 0x2e, 0xe0, 0x47, 0x70, 0xf6, 0x89, 0x50, + 0xc6, 0xeb, 0xd0, 0x8d, 0xbd, 0xf9, 0xf4, 0xe2, 0x1c, 0xdd, 0xbe, 0x26, 0xda, 0x77, 0x41, 0xbd, + 0xe4, 0xfd, 0x8d, 0x56, 0x1d, 0x1e, 0xf4, 0xf0, 0x1c, 0xf8, 0x92, 0x13, 0xbd, 0x14, 0x6a, 0x55, + 0x87, 0x9e, 0x35, 0x0b, 0x90, 0xa4, 0xe8, 0x6a, 0x68, 0x66, 0x27, 0xeb, 0x5f, 0x4f, 0xee, 0xe0, + 0x1d, 0x09, 0xbe, 0x05, 0x93, 0x0f, 0xef, 0xae, 0x04, 0x2f, 0xf3, 0x2e, 0x3c, 0xb1, 0x82, 0xf8, + 0xd8, 0xeb, 0x23, 0x0f, 0x6f, 0x15, 0xf0, 0x33, 0xb8, 0x9f, 0x0d, 0xbc, 0x2f, 0x4c, 0xd5, 0xa5, + 0xb8, 0x09, 0x4f, 0x63, 0x67, 0x3e, 0xbd, 0x78, 0x7a, 0xcc, 0xe4, 0x80, 0x8e, 0x0f, 0xf5, 0xd1, + 0x6b, 0x30, 0xdd, 0xbb, 0x0c, 0x3e, 0x00, 0x5e, 0xc5, 0xba, 0x21, 0x2c, 0x53, 0xc2, 0x87, 0xe0, + 0xb4, 0x25, 0xbc, 0x61, 0xa1, 0x6b, 0x7b, 0x3d, 0x78, 0xe3, 0xbe, 0x72, 0x92, 0x1f, 0xbb, 0x5b, + 0x8c, 0x8e, 0x70, 0x6e, 0x75, 0x13, 0x6c, 0x4a, 0x98, 0x80, 0xa0, 0x62, 0x4c, 0x2e, 0x1a, 0x45, + 0xb4, 0x59, 0xd4, 0xc8, 0x3d, 0xfc, 0x4f, 0x0f, 0x3e, 0x06, 0xbe, 0xc1, 0x59, 0xa7, 0x99, 0xc9, + 0xcf, 0x10, 0x76, 0x0d, 0x18, 0x82, 0xbb, 0xcb, 0x92, 0x6b, 0xa6, 0x6a, 0x1b, 0x95, 0x8f, 0x47, + 0x98, 0x90, 0xff, 0x72, 0x30, 0x64, 0x49, 0xf2, 0x8a, 0x14, 0x6c, 0x58, 0x7e, 0x84, 0x66, 0xd2, + 0x0e, 0x61, 0xf5, 0x27, 0x8c, 0x10, 0x46, 0x60, 0xa2, 0x58, 0x5b, 0xda, 0x91, 0x67, 0x47, 0x5b, + 0x9c, 0x05, 0xeb, 0xcd, 0xcc, 0xf9, 0xb9, 0x99, 0x39, 0xbf, 0x37, 0x33, 0x87, 0x9e, 0xd9, 0xaf, + 0xf5, 0xf2, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x19, 0xcf, 0xd5, 0xdf, 0x02, 0x00, 0x00, } func (m *WorkerRecord) Marshal() (dAtA []byte, err error) { @@ -225,6 +300,18 @@ func (m *WorkerRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.BuildkitVersion != nil { + { + size, err := m.BuildkitVersion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWorker(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } if len(m.GCPolicy) > 0 { for iNdEx := len(m.GCPolicy) - 1; iNdEx >= 0; iNdEx-- { { @@ -338,6 +425,54 @@ func (m *GCPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *BuildkitVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildkitVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildkitVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Revision) > 0 { + i -= len(m.Revision) + copy(dAtA[i:], m.Revision) + i = encodeVarintWorker(dAtA, i, uint64(len(m.Revision))) + i-- + dAtA[i] = 0x1a + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintWorker(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + } + if len(m.Package) > 0 { + i -= len(m.Package) + copy(dAtA[i:], m.Package) + i = encodeVarintWorker(dAtA, i, uint64(len(m.Package))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintWorker(dAtA []byte, offset int, v uint64) int { offset -= sovWorker(v) base := offset @@ -379,6 +514,10 @@ func (m *WorkerRecord) Size() (n int) { n += 1 + l + sovWorker(uint64(l)) } } + if m.BuildkitVersion != nil { + l = m.BuildkitVersion.Size() + n += 1 + l + sovWorker(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -412,6 +551,30 @@ func (m *GCPolicy) Size() (n int) { return n } +func (m *BuildkitVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Package) + if l > 0 { + n += 1 + l + sovWorker(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovWorker(uint64(l)) + } + l = len(m.Revision) + if l > 0 { + n += 1 + l + sovWorker(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovWorker(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -674,6 +837,42 @@ func (m *WorkerRecord) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildkitVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWorker + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWorker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BuildkitVersion == nil { + m.BuildkitVersion = &BuildkitVersion{} + } + if err := m.BuildkitVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipWorker(dAtA[iNdEx:]) @@ -837,6 +1036,153 @@ func (m *GCPolicy) Unmarshal(dAtA []byte) error { } return nil } +func (m *BuildkitVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildkitVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildkitVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Package", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorker + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Package = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorker + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWorker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWorker + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWorker + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWorker(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWorker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipWorker(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/moby/buildkit/api/types/worker.proto b/vendor/github.com/moby/buildkit/api/types/worker.proto index 82dd7ad651..476fcc62e1 100644 --- a/vendor/github.com/moby/buildkit/api/types/worker.proto +++ b/vendor/github.com/moby/buildkit/api/types/worker.proto @@ -14,6 +14,7 @@ message WorkerRecord { map Labels = 2; repeated pb.Platform platforms = 3 [(gogoproto.nullable) = false]; repeated GCPolicy GCPolicy = 4; + BuildkitVersion BuildkitVersion = 5; } message GCPolicy { @@ -22,3 +23,9 @@ message GCPolicy { int64 keepBytes = 3; repeated string filters = 4; } + +message BuildkitVersion { + string package = 1; + string version = 2; + string revision = 3; +} diff --git a/vendor/github.com/moby/buildkit/cache/blobs.go b/vendor/github.com/moby/buildkit/cache/blobs.go index 8d2beefd06..716be90934 100644 --- a/vendor/github.com/moby/buildkit/cache/blobs.go +++ b/vendor/github.com/moby/buildkit/cache/blobs.go @@ -1,19 +1,15 @@ package cache import ( - "compress/gzip" "context" "fmt" - "io" "os" "strconv" - "github.com/containerd/containerd/content" "github.com/containerd/containerd/diff" "github.com/containerd/containerd/diff/walking" "github.com/containerd/containerd/leases" "github.com/containerd/containerd/mount" - "github.com/klauspost/compress/zstd" "github.com/moby/buildkit/session" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/flightcontrol" @@ -40,6 +36,14 @@ func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded boo if _, ok := leases.FromContext(ctx); !ok { return errors.Errorf("missing lease requirement for computeBlobChain") } + if !createIfNeeded { + sr.mu.Lock() + if sr.equalMutable != nil { + sr.mu.Unlock() + return nil + } + sr.mu.Unlock() + } if err := sr.Finalize(ctx); err != nil { return err @@ -57,8 +61,6 @@ func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded boo return computeBlobChain(ctx, sr, createIfNeeded, comp, s, filter) } -type compressor func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) - func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool, comp compression.Config, s session.Group, filter map[string]struct{}) error { eg, ctx := errgroup.WithContext(ctx) switch sr.kind() { @@ -92,28 +94,8 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool return nil, errors.WithStack(ErrNoBlobs) } - var mediaType string - var compressorFunc compressor - var finalize func(context.Context, content.Store) (map[string]string, error) - switch comp.Type { - case compression.Uncompressed: - mediaType = ocispecs.MediaTypeImageLayer - case compression.Gzip: - compressorFunc = func(dest io.Writer, _ string) (io.WriteCloser, error) { - return gzipWriter(comp)(dest) - } - mediaType = ocispecs.MediaTypeImageLayerGzip - case compression.EStargz: - compressorFunc, finalize = compressEStargz(comp) - mediaType = ocispecs.MediaTypeImageLayerGzip - case compression.Zstd: - compressorFunc = func(dest io.Writer, _ string) (io.WriteCloser, error) { - return zstdWriter(comp)(dest) - } - mediaType = ocispecs.MediaTypeImageLayer + "+zstd" - default: - return nil, errors.Errorf("unknown layer compression type: %q", comp.Type) - } + compressorFunc, finalize := comp.Type.Compress(ctx, comp) + mediaType := comp.Type.MediaType() var lowerRef *immutableRef switch sr.kind() { @@ -206,7 +188,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool } } - if desc.Digest == "" && !isTypeWindows(sr) && (comp.Type == compression.Zstd || comp.Type == compression.EStargz) { + if desc.Digest == "" && !isTypeWindows(sr) && comp.Type.NeedsComputeDiffBySelf() { // These compression types aren't supported by containerd differ. So try to compute diff on buildkit side. // This case can be happen on containerd worker + non-overlayfs snapshotter (e.g. native). // See also: https://github.com/containerd/containerd/issues/4263 @@ -433,7 +415,7 @@ func isTypeWindows(sr *immutableRef) bool { // ensureCompression ensures the specified ref has the blob of the specified compression Type. func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.Config, s session.Group) error { - _, err := g.Do(ctx, fmt.Sprintf("%s-%d", ref.ID(), comp.Type), func(ctx context.Context) (interface{}, error) { + _, err := g.Do(ctx, fmt.Sprintf("%s-%s", ref.ID(), comp.Type), func(ctx context.Context) (interface{}, error) { desc, err := ref.ociDesc(ctx, ref.descHandlers, true) if err != nil { return nil, err @@ -480,38 +462,3 @@ func ensureCompression(ctx context.Context, ref *immutableRef, comp compression. }) return err } - -func gzipWriter(comp compression.Config) func(io.Writer) (io.WriteCloser, error) { - return func(dest io.Writer) (io.WriteCloser, error) { - level := gzip.DefaultCompression - if comp.Level != nil { - level = *comp.Level - } - return gzip.NewWriterLevel(dest, level) - } -} - -func zstdWriter(comp compression.Config) func(io.Writer) (io.WriteCloser, error) { - return func(dest io.Writer) (io.WriteCloser, error) { - level := zstd.SpeedDefault - if comp.Level != nil { - level = toZstdEncoderLevel(*comp.Level) - } - return zstd.NewWriter(dest, zstd.WithEncoderLevel(level)) - } -} - -func toZstdEncoderLevel(level int) zstd.EncoderLevel { - // map zstd compression levels to go-zstd levels - // once we also have c based implementation move this to helper pkg - if level < 0 { - return zstd.SpeedDefault - } else if level < 3 { - return zstd.SpeedFastest - } else if level < 7 { - return zstd.SpeedDefault - } else if level < 9 { - return zstd.SpeedBetterCompression - } - return zstd.SpeedBestCompression -} diff --git a/vendor/github.com/moby/buildkit/cache/blobs_linux.go b/vendor/github.com/moby/buildkit/cache/blobs_linux.go index fcb8850a02..ce41275e6b 100644 --- a/vendor/github.com/moby/buildkit/cache/blobs_linux.go +++ b/vendor/github.com/moby/buildkit/cache/blobs_linux.go @@ -12,6 +12,7 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/mount" "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/overlay" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" @@ -24,7 +25,7 @@ var emptyDesc = ocispecs.Descriptor{} // diff between lower and upper snapshot. If the passed mounts cannot // be computed (e.g. because the mounts aren't overlayfs), it returns // an error. -func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compressor) (_ ocispecs.Descriptor, ok bool, err error) { +func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compression.Compressor) (_ ocispecs.Descriptor, ok bool, err error) { // Get upperdir location if mounts are overlayfs that can be processed by this differ. upperdir, err := overlay.GetUpperdir(lower, upper) if err != nil { @@ -57,11 +58,14 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper if err != nil { return emptyDesc, false, errors.Wrap(err, "failed to get compressed stream") } - err = overlay.WriteUpperdir(ctx, io.MultiWriter(compressed, dgstr.Hash()), upperdir, lower) - compressed.Close() - if err != nil { + // Close ensure compressorFunc does some finalization works. + defer compressed.Close() + if err := overlay.WriteUpperdir(ctx, io.MultiWriter(compressed, dgstr.Hash()), upperdir, lower); err != nil { return emptyDesc, false, errors.Wrap(err, "failed to write compressed diff") } + if err := compressed.Close(); err != nil { + return emptyDesc, false, errors.Wrap(err, "failed to close compressed diff writer") + } if labels == nil { labels = map[string]string{} } diff --git a/vendor/github.com/moby/buildkit/cache/blobs_nolinux.go b/vendor/github.com/moby/buildkit/cache/blobs_nolinux.go index 2ccee770e2..1567768c19 100644 --- a/vendor/github.com/moby/buildkit/cache/blobs_nolinux.go +++ b/vendor/github.com/moby/buildkit/cache/blobs_nolinux.go @@ -6,11 +6,12 @@ package cache import ( "context" + "github.com/moby/buildkit/util/compression" "github.com/containerd/containerd/mount" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) -func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compressor) (_ ocispecs.Descriptor, ok bool, err error) { +func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compression.Compressor) (_ ocispecs.Descriptor, ok bool, err error) { return ocispecs.Descriptor{}, true, errors.Errorf("overlayfs-based diff computing is unsupported") } diff --git a/vendor/github.com/moby/buildkit/cache/compression.go b/vendor/github.com/moby/buildkit/cache/compression.go new file mode 100644 index 0000000000..bede8d9322 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/compression.go @@ -0,0 +1,16 @@ +//go:build !nydus +// +build !nydus + +package cache + +import ( + "context" + + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/cache/config" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func needsForceCompression(ctx context.Context, cs content.Store, source ocispecs.Descriptor, refCfg config.RefConfig) bool { + return refCfg.Compression.Force +} diff --git a/vendor/github.com/moby/buildkit/cache/compression_nydus.go b/vendor/github.com/moby/buildkit/cache/compression_nydus.go new file mode 100644 index 0000000000..48b61a4b36 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/compression_nydus.go @@ -0,0 +1,147 @@ +//go:build nydus +// +build nydus + +package cache + +import ( + "compress/gzip" + "context" + "encoding/json" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/moby/buildkit/cache/config" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/compression" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + + nydusify "github.com/containerd/nydus-snapshotter/pkg/converter" +) + +func init() { + additionalAnnotations = append( + additionalAnnotations, + nydusify.LayerAnnotationNydusBlob, nydusify.LayerAnnotationNydusBootstrap, nydusify.LayerAnnotationNydusBlobIDs, + ) +} + +// Nydus compression type can't be mixed with other compression types in the same image, +// so if `source` is this kind of layer, but the target is other compression type, we +// should do the forced compression. +func needsForceCompression(ctx context.Context, cs content.Store, source ocispecs.Descriptor, refCfg config.RefConfig) bool { + if refCfg.Compression.Force { + return true + } + isNydusBlob, _ := compression.Nydus.Is(ctx, cs, source) + if refCfg.Compression.Type == compression.Nydus { + return !isNydusBlob + } + return isNydusBlob +} + +// MergeNydus does two steps: +// 1. Extracts nydus bootstrap from nydus format (nydus blob + nydus bootstrap) for each layer. +// 2. Merge all nydus bootstraps into a final bootstrap (will as an extra layer). +// The nydus bootstrap size is very small, so the merge operation is fast. +func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config, s session.Group) (*ocispecs.Descriptor, error) { + iref, ok := ref.(*immutableRef) + if !ok { + return nil, errors.Errorf("unsupported ref type %T", ref) + } + refs := iref.layerChain() + if len(refs) == 0 { + return nil, errors.Errorf("refs can't be empty") + } + + // Extracts nydus bootstrap from nydus format for each layer. + var cm *cacheManager + layers := []nydusify.Layer{} + blobIDs := []string{} + for _, ref := range refs { + blobDesc, err := getBlobWithCompressionWithRetry(ctx, ref, comp, s) + if err != nil { + return nil, errors.Wrapf(err, "get compression blob %q", comp.Type) + } + ra, err := ref.cm.ContentStore.ReaderAt(ctx, blobDesc) + if err != nil { + return nil, errors.Wrapf(err, "get reader for compression blob %q", comp.Type) + } + defer ra.Close() + if cm == nil { + cm = ref.cm + } + blobIDs = append(blobIDs, blobDesc.Digest.Hex()) + layers = append(layers, nydusify.Layer{ + Digest: blobDesc.Digest, + ReaderAt: ra, + }) + } + + // Merge all nydus bootstraps into a final nydus bootstrap. + pr, pw := io.Pipe() + go func() { + defer pw.Close() + if _, err := nydusify.Merge(ctx, layers, pw, nydusify.MergeOption{ + WithTar: true, + }); err != nil { + pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap")) + } + }() + + // Compress final nydus bootstrap to tar.gz and write into content store. + cw, err := content.OpenWriter(ctx, cm.ContentStore, content.WithRef("nydus-merge-"+iref.getChainID().String())) + if err != nil { + return nil, errors.Wrap(err, "open content store writer") + } + defer cw.Close() + + gw := gzip.NewWriter(cw) + uncompressedDgst := digest.SHA256.Digester() + compressed := io.MultiWriter(gw, uncompressedDgst.Hash()) + if _, err := io.Copy(compressed, pr); err != nil { + return nil, errors.Wrapf(err, "copy bootstrap targz into content store") + } + if err := gw.Close(); err != nil { + return nil, errors.Wrap(err, "close gzip writer") + } + + compressedDgst := cw.Digest() + if err := cw.Commit(ctx, 0, compressedDgst, content.WithLabels(map[string]string{ + containerdUncompressed: uncompressedDgst.Digest().String(), + })); err != nil { + if !errdefs.IsAlreadyExists(err) { + return nil, errors.Wrap(err, "commit to content store") + } + } + if err := cw.Close(); err != nil { + return nil, errors.Wrap(err, "close content store writer") + } + + info, err := cm.ContentStore.Info(ctx, compressedDgst) + if err != nil { + return nil, errors.Wrap(err, "get info from content store") + } + + blobIDsBytes, err := json.Marshal(blobIDs) + if err != nil { + return nil, errors.Wrap(err, "marshal blob ids") + } + + desc := ocispecs.Descriptor{ + Digest: compressedDgst, + Size: info.Size, + MediaType: ocispecs.MediaTypeImageLayerGzip, + Annotations: map[string]string{ + containerdUncompressed: uncompressedDgst.Digest().String(), + // Use this annotation to identify nydus bootstrap layer. + nydusify.LayerAnnotationNydusBootstrap: "true", + // Track all blob digests for nydus snapshotter. + nydusify.LayerAnnotationNydusBlobIDs: string(blobIDsBytes), + }, + } + + return &desc, nil +} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go index a59523dd29..dcf424a6b4 100644 --- a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go +++ b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go @@ -11,13 +11,13 @@ import ( "strings" "sync" - "github.com/docker/docker/pkg/fileutils" iradix "github.com/hashicorp/go-immutable-radix" "github.com/hashicorp/golang-lru/simplelru" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/locker" + "github.com/moby/patternmatcher" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/tonistiigi/fsutil" @@ -79,8 +79,8 @@ type includedPath struct { path string record *CacheRecord included bool - includeMatchInfo fileutils.MatchInfo - excludeMatchInfo fileutils.MatchInfo + includeMatchInfo patternmatcher.MatchInfo + excludeMatchInfo patternmatcher.MatchInfo } type cacheManager struct { @@ -496,17 +496,17 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o endsInSep := len(p) != 0 && p[len(p)-1] == filepath.Separator p = keyPath(p) - var includePatternMatcher *fileutils.PatternMatcher + var includePatternMatcher *patternmatcher.PatternMatcher if len(opts.IncludePatterns) != 0 { - includePatternMatcher, err = fileutils.NewPatternMatcher(opts.IncludePatterns) + includePatternMatcher, err = patternmatcher.New(opts.IncludePatterns) if err != nil { return nil, errors.Wrapf(err, "invalid includepatterns: %s", opts.IncludePatterns) } } - var excludePatternMatcher *fileutils.PatternMatcher + var excludePatternMatcher *patternmatcher.PatternMatcher if len(opts.ExcludePatterns) != 0 { - excludePatternMatcher, err = fileutils.NewPatternMatcher(opts.ExcludePatterns) + excludePatternMatcher, err = patternmatcher.New(opts.ExcludePatterns) if err != nil { return nil, errors.Wrapf(err, "invalid excludepatterns: %s", opts.ExcludePatterns) } @@ -695,21 +695,21 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o func shouldIncludePath( candidate string, - includePatternMatcher *fileutils.PatternMatcher, - excludePatternMatcher *fileutils.PatternMatcher, + includePatternMatcher *patternmatcher.PatternMatcher, + excludePatternMatcher *patternmatcher.PatternMatcher, maybeIncludedPath *includedPath, parentDir *includedPath, ) (bool, error) { var ( m bool - matchInfo fileutils.MatchInfo + matchInfo patternmatcher.MatchInfo err error ) if includePatternMatcher != nil { if parentDir != nil { m, matchInfo, err = includePatternMatcher.MatchesUsingParentResults(candidate, parentDir.includeMatchInfo) } else { - m, matchInfo, err = includePatternMatcher.MatchesUsingParentResults(candidate, fileutils.MatchInfo{}) + m, matchInfo, err = includePatternMatcher.MatchesUsingParentResults(candidate, patternmatcher.MatchInfo{}) } if err != nil { return false, errors.Wrap(err, "failed to match includepatterns") @@ -724,7 +724,7 @@ func shouldIncludePath( if parentDir != nil { m, matchInfo, err = excludePatternMatcher.MatchesUsingParentResults(candidate, parentDir.excludeMatchInfo) } else { - m, matchInfo, err = excludePatternMatcher.MatchesUsingParentResults(candidate, fileutils.MatchInfo{}) + m, matchInfo, err = excludePatternMatcher.MatchesUsingParentResults(candidate, patternmatcher.MatchInfo{}) } if err != nil { return false, errors.Wrap(err, "failed to match excludepatterns") @@ -799,7 +799,7 @@ func splitWildcards(p string) (d1, d2 string) { p2 = append(p2, p) } } - return filepath.Join(p1...), filepath.Join(p2...) + return path.Join(p1...), path.Join(p2...) } func containsWildcards(name string) bool { @@ -1015,7 +1015,7 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr Type: CacheRecordTypeSymlink, Linkname: filepath.ToSlash(link), } - k := []byte(filepath.Join("/", filepath.ToSlash(p))) + k := []byte(path.Join("/", filepath.ToSlash(p))) k = convertPathToKey(k) txn.Insert(k, cr) return nil @@ -1024,15 +1024,15 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr return err } - err = filepath.Walk(parentPath, func(path string, fi os.FileInfo, err error) error { + err = filepath.Walk(parentPath, func(itemPath string, fi os.FileInfo, err error) error { if err != nil { - return errors.Wrapf(err, "failed to walk %s", path) + return errors.Wrapf(err, "failed to walk %s", itemPath) } - rel, err := filepath.Rel(mp, path) + rel, err := filepath.Rel(mp, itemPath) if err != nil { return err } - k := []byte(filepath.Join("/", filepath.ToSlash(rel))) + k := []byte(path.Join("/", filepath.ToSlash(rel))) if string(k) == "/" { k = []byte{} } @@ -1043,7 +1043,7 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr } if fi.Mode()&os.ModeSymlink != 0 { cr.Type = CacheRecordTypeSymlink - link, err := os.Readlink(path) + link, err := os.Readlink(itemPath) if err != nil { return err } diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go b/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go index 0b5267101b..246f8f7f1c 100644 --- a/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go +++ b/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go @@ -51,6 +51,8 @@ func NewFromStat(stat *fstypes.Stat) (hash.Hash, error) { hdr.Name = "" // note: empty name is different from current has in docker build. Name is added on recursive directory scan instead hdr.Devmajor = stat.Devmajor hdr.Devminor = stat.Devminor + hdr.Uid = int(stat.Uid) + hdr.Gid = int(stat.Gid) if len(stat.Xattrs) > 0 { hdr.PAXRecords = make(map[string]string, len(stat.Xattrs)) diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go b/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go index 182c461184..456e1ad7f1 100644 --- a/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go +++ b/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go @@ -37,10 +37,10 @@ func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { pax := h.PAXRecords - if len(h.Xattrs) > 0 { //nolint deprecated + if len(h.Xattrs) > 0 { //nolint:staticcheck // field deprecated in stdlib if pax == nil { pax = map[string]string{} - for k, v := range h.Xattrs { //nolint deprecated + for k, v := range h.Xattrs { //nolint:staticcheck // field deprecated in stdlib pax["SCHILY.xattr."+k] = v } } diff --git a/vendor/github.com/moby/buildkit/cache/converter.go b/vendor/github.com/moby/buildkit/cache/converter.go index a7e4df193a..f19412b708 100644 --- a/vendor/github.com/moby/buildkit/cache/converter.go +++ b/vendor/github.com/moby/buildkit/cache/converter.go @@ -7,120 +7,46 @@ import ( "io" "sync" - cdcompression "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" "github.com/containerd/containerd/images/converter" "github.com/containerd/containerd/labels" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/iohelper" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) -// needsConversion indicates whether a conversion is needed for the specified descriptor to -// be the compressionType. -func needsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, compressionType compression.Type) (bool, error) { - mediaType := desc.MediaType - switch compressionType { - case compression.Uncompressed: - if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Uncompressed { - return false, nil - } - case compression.Gzip: - esgz, err := isEStargz(ctx, cs, desc.Digest) - if err != nil { - return false, err - } - if (!images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Gzip) && !esgz { - return false, nil - } - case compression.Zstd: - if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Zstd { - return false, nil - } - case compression.EStargz: - esgz, err := isEStargz(ctx, cs, desc.Digest) - if err != nil { - return false, err - } - if !images.IsLayerType(mediaType) || esgz { - return false, nil - } - default: - return false, fmt.Errorf("unknown compression type during conversion: %q", compressionType) - } - return true, nil -} - // getConverter returns converter function according to the specified compression type. // If no conversion is needed, this returns nil without error. func getConverter(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, comp compression.Config) (converter.ConvertFunc, error) { - if needs, err := needsConversion(ctx, cs, desc, comp.Type); err != nil { + if needs, err := comp.Type.NeedsConversion(ctx, cs, desc); err != nil { return nil, errors.Wrapf(err, "failed to determine conversion needs") } else if !needs { // No conversion. No need to return an error here. return nil, nil } + from, err := compression.FromMediaType(desc.MediaType) + if err != nil { + return nil, err + } + c := conversion{target: comp} - - from := compression.FromMediaType(desc.MediaType) - switch from { - case compression.Uncompressed: - case compression.Gzip, compression.Zstd: - c.decompress = func(ctx context.Context, desc ocispecs.Descriptor) (r io.ReadCloser, err error) { - ra, err := cs.ReaderAt(ctx, desc) - if err != nil { - return nil, err - } - esgz, err := isEStargz(ctx, cs, desc.Digest) - if err != nil { - return nil, err - } else if esgz { - r, err = decompressEStargz(io.NewSectionReader(ra, 0, ra.Size())) - if err != nil { - return nil, err - } - } else { - r, err = cdcompression.DecompressStream(io.NewSectionReader(ra, 0, ra.Size())) - if err != nil { - return nil, err - } - } - return &readCloser{r, ra.Close}, nil - } - default: - return nil, errors.Errorf("unsupported source compression type %q from mediatype %q", from, desc.MediaType) - } - - switch comp.Type { - case compression.Uncompressed: - case compression.Gzip: - c.compress = gzipWriter(comp) - case compression.Zstd: - c.compress = zstdWriter(comp) - case compression.EStargz: - compressorFunc, finalize := compressEStargz(comp) - c.compress = func(w io.Writer) (io.WriteCloser, error) { - return compressorFunc(w, ocispecs.MediaTypeImageLayerGzip) - } - c.finalize = finalize - default: - return nil, errors.Errorf("unknown target compression type during conversion: %q", comp.Type) - } + c.compress, c.finalize = comp.Type.Compress(ctx, comp) + c.decompress = from.Decompress return (&c).convert, nil } type conversion struct { target compression.Config - decompress func(context.Context, ocispecs.Descriptor) (io.ReadCloser, error) - compress func(w io.Writer) (io.WriteCloser, error) - finalize func(context.Context, content.Store) (map[string]string, error) + decompress compression.Decompressor + compress compression.Compressor + finalize compression.Finalizer } var bufioPool = sync.Pool{ @@ -151,34 +77,20 @@ func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispec bufW = bufio.NewWriterSize(w, 128*1024) } defer bufioPool.Put(bufW) - var zw io.WriteCloser = &nopWriteCloser{bufW} - if c.compress != nil { - zw, err = c.compress(zw) - if err != nil { - return nil, err - } + zw, err := c.compress(&iohelper.NopWriteCloser{Writer: bufW}, c.target.Type.MediaType()) + if err != nil { + return nil, err } zw = &onceWriteCloser{WriteCloser: zw} defer zw.Close() // convert this layer diffID := digest.Canonical.Digester() - var rdr io.Reader - if c.decompress == nil { - ra, err := cs.ReaderAt(ctx, desc) - if err != nil { - return nil, err - } - defer ra.Close() - rdr = io.NewSectionReader(ra, 0, ra.Size()) - } else { - rc, err := c.decompress(ctx, desc) - if err != nil { - return nil, err - } - defer rc.Close() - rdr = rc + rdr, err := c.decompress(ctx, cs, desc) + if err != nil { + return nil, err } + defer rdr.Close() if _, err := io.Copy(zw, io.TeeReader(rdr, diffID.Hash())); err != nil { return nil, err } @@ -201,7 +113,7 @@ func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispec } newDesc := desc - newDesc.MediaType = c.target.Type.DefaultMediaType() + newDesc.MediaType = c.target.Type.MediaType() newDesc.Digest = info.Digest newDesc.Size = info.Size newDesc.Annotations = map[string]string{labels.LabelUncompressed: diffID.Digest().String()} @@ -217,28 +129,6 @@ func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispec return &newDesc, nil } -type readCloser struct { - io.ReadCloser - closeFunc func() error -} - -func (rc *readCloser) Close() error { - err1 := rc.ReadCloser.Close() - err2 := rc.closeFunc() - if err1 != nil { - return errors.Wrapf(err1, "failed to close: %v", err2) - } - return err2 -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { - return nil -} - type onceWriteCloser struct { io.WriteCloser closeOnce sync.Once diff --git a/vendor/github.com/moby/buildkit/cache/filelist.go b/vendor/github.com/moby/buildkit/cache/filelist.go new file mode 100644 index 0000000000..c2c7921fd5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/filelist.go @@ -0,0 +1,90 @@ +package cache + +import ( + "archive/tar" + "context" + "encoding/json" + "fmt" + "io" + "path" + "sort" + + cdcompression "github.com/containerd/containerd/archive/compression" + "github.com/moby/buildkit/session" +) + +const keyFileList = "filelist" + +// FileList returns an ordered list of files present in the cache record that were +// changed compared to the parent. The paths of the files are in same format as they +// are in the tar stream (AUFS whiteout format). If the reference does not have a +// a blob associated with it, the list is empty. +func (sr *immutableRef) FileList(ctx context.Context, s session.Group) ([]string, error) { + res, err := g.Do(ctx, fmt.Sprintf("filelist-%s", sr.ID()), func(ctx context.Context) (interface{}, error) { + dt, err := sr.GetExternal(keyFileList) + if err == nil && dt != nil { + var files []string + if err := json.Unmarshal(dt, &files); err != nil { + return nil, err + } + return files, nil + } + + if sr.getBlob() == "" { + return nil, nil + } + + // lazy blobs need to be pulled first + if err := sr.Extract(ctx, s); err != nil { + return nil, err + } + + desc, err := sr.ociDesc(ctx, sr.descHandlers, false) + if err != nil { + return nil, err + } + + ra, err := sr.cm.ContentStore.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + + r, err := cdcompression.DecompressStream(io.NewSectionReader(ra, 0, ra.Size())) + if err != nil { + return nil, err + } + defer r.Close() + + var files []string + + rdr := tar.NewReader(r) + for { + hdr, err := rdr.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + name := path.Clean(hdr.Name) + files = append(files, name) + } + sort.Strings(files) + + dt, err = json.Marshal(files) + if err != nil { + return nil, err + } + if err := sr.SetExternal(keyFileList, dt); err != nil { + return nil, err + } + return files, nil + }) + if err != nil { + return nil, err + } + if res == nil { + return nil, nil + } + return res.([]string), nil +} diff --git a/vendor/github.com/moby/buildkit/cache/manager.go b/vendor/github.com/moby/buildkit/cache/manager.go index 58e28b4743..983f7cd529 100644 --- a/vendor/github.com/moby/buildkit/cache/manager.go +++ b/vendor/github.com/moby/buildkit/cache/manager.go @@ -301,7 +301,14 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor, cm.records[id] = rec - return rec.ref(true, descHandlers, nil), nil + ref := rec.ref(true, descHandlers, nil) + if s := unlazySessionOf(opts...); s != nil { + if err := ref.unlazy(ctx, ref.descHandlers, ref.progress, s, true); err != nil { + return nil, err + } + } + + return ref, nil } // init loads all snapshots from metadata state and tries to load the records diff --git a/vendor/github.com/moby/buildkit/cache/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata.go index 121110bd13..d6410fd554 100644 --- a/vendor/github.com/moby/buildkit/cache/metadata.go +++ b/vendor/github.com/moby/buildkit/cache/metadata.go @@ -551,9 +551,7 @@ func (md *cacheMetadata) appendStringSlice(key string, values ...string) error { } for _, existing := range slice { - if _, ok := idx[existing]; ok { - delete(idx, existing) - } + delete(idx, existing) } if len(idx) == 0 { diff --git a/vendor/github.com/moby/buildkit/cache/metadata/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata/metadata.go index ae957c3e72..170c0a8872 100644 --- a/vendor/github.com/moby/buildkit/cache/metadata/metadata.go +++ b/vendor/github.com/moby/buildkit/cache/metadata/metadata.go @@ -317,6 +317,9 @@ func (s *StorageItem) Queue(fn func(b *bolt.Bucket) error) { func (s *StorageItem) Commit() error { s.qmu.Lock() defer s.qmu.Unlock() + if len(s.queue) == 0 { + return nil + } return errors.WithStack(s.Update(func(b *bolt.Bucket) error { for _, fn := range s.queue { if err := fn(b); err != nil { diff --git a/vendor/github.com/moby/buildkit/cache/opts.go b/vendor/github.com/moby/buildkit/cache/opts.go index 92df9989d9..1f1db6ca61 100644 --- a/vendor/github.com/moby/buildkit/cache/opts.go +++ b/vendor/github.com/moby/buildkit/cache/opts.go @@ -36,4 +36,13 @@ func (m NeedsRemoteProviderError) Error() string { return fmt.Sprintf("missing descriptor handlers for lazy blobs %+v", []digest.Digest(m)) } -type ProgressKey struct{} +type Unlazy session.Group + +func unlazySessionOf(opts ...RefOption) session.Group { + for _, opt := range opts { + if opt, ok := opt.(session.Group); ok { + return opt + } + } + return nil +} diff --git a/vendor/github.com/moby/buildkit/cache/refs.go b/vendor/github.com/moby/buildkit/cache/refs.go index 0eee8fd47a..dc2cd561b0 100644 --- a/vendor/github.com/moby/buildkit/cache/refs.go +++ b/vendor/github.com/moby/buildkit/cache/refs.go @@ -3,7 +3,6 @@ package cache import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -37,6 +36,8 @@ import ( "golang.org/x/sync/errgroup" ) +var additionalAnnotations = append(compression.EStargzAnnotations, containerdUncompressed) + // Ref is a reference to cacheable objects. type Ref interface { Mountable @@ -56,6 +57,7 @@ type ImmutableRef interface { Extract(ctx context.Context, s session.Group) error // +progress GetRemotes(ctx context.Context, createIfNeeded bool, cfg config.RefConfig, all bool, s session.Group) ([]*solver.Remote, error) LayerChain() RefList + FileList(ctx context.Context, s session.Group) ([]string, error) } type MutableRef interface { @@ -768,12 +770,9 @@ func (sr *immutableRef) getBlobWithCompression(ctx context.Context, compressionT } func getBlobWithCompression(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, compressionType compression.Type) (ocispecs.Descriptor, error) { - if compressionType == compression.UnknownCompression { - return ocispecs.Descriptor{}, fmt.Errorf("cannot get unknown compression type") - } var target *ocispecs.Descriptor if err := walkBlob(ctx, cs, desc, func(desc ocispecs.Descriptor) bool { - if needs, err := needsConversion(ctx, cs, desc, compressionType); err == nil && !needs { + if needs, err := compressionType.NeedsConversion(ctx, cs, desc); err == nil && !needs { target = &desc return false } @@ -838,11 +837,11 @@ func getBlobDesc(ctx context.Context, cs content.Store, dgst digest.Digest) (oci return ocispecs.Descriptor{}, err } if info.Labels == nil { - return ocispecs.Descriptor{}, fmt.Errorf("no blob metadata is stored for %q", info.Digest) + return ocispecs.Descriptor{}, errors.Errorf("no blob metadata is stored for %q", info.Digest) } mt, ok := info.Labels[blobMediaTypeLabel] if !ok { - return ocispecs.Descriptor{}, fmt.Errorf("no media type is stored for %q", info.Digest) + return ocispecs.Descriptor{}, errors.Errorf("no media type is stored for %q", info.Digest) } desc := ocispecs.Descriptor{ Digest: info.Digest, @@ -882,7 +881,7 @@ func filterAnnotationsForSave(a map[string]string) (b map[string]string) { if a == nil { return nil } - for _, k := range append(eStargzAnnotations, containerdUncompressed) { + for _, k := range additionalAnnotations { v, ok := a[k] if !ok { continue @@ -1552,12 +1551,12 @@ func readonlyOverlay(opt []string) []string { func newSharableMountPool(tmpdirRoot string) (sharableMountPool, error) { if tmpdirRoot != "" { if err := os.MkdirAll(tmpdirRoot, 0700); err != nil { - return sharableMountPool{}, fmt.Errorf("failed to prepare mount pool: %w", err) + return sharableMountPool{}, errors.Wrap(err, "failed to prepare mount pool") } // If tmpdirRoot is specified, remove existing mounts to avoid conflict. files, err := os.ReadDir(tmpdirRoot) if err != nil { - return sharableMountPool{}, fmt.Errorf("failed to read mount pool: %w", err) + return sharableMountPool{}, errors.Wrap(err, "failed to read mount pool") } for _, file := range files { if file.IsDir() { @@ -1591,9 +1590,10 @@ func (p sharableMountPool) setSharable(mounts snapshot.Mountable) snapshot.Mount // This is useful to share writable overlayfs mounts. // // NOTE: Mount() method doesn't return the underlying mount configuration (e.g. overlayfs mounts) -// instead it always return bind mounts of the temporary mount point. So if the caller -// needs to inspect the underlying mount configuration (e.g. for optimized differ for -// overlayfs), this wrapper shouldn't be used. +// +// instead it always return bind mounts of the temporary mount point. So if the caller +// needs to inspect the underlying mount configuration (e.g. for optimized differ for +// overlayfs), this wrapper shouldn't be used. type sharableMountable struct { snapshot.Mountable @@ -1631,7 +1631,7 @@ func (sm *sharableMountable) Mount() (_ []mount.Mount, _ func() error, retErr er // Don't need temporary mount wrapper for non-overlayfs mounts return mounts, release, nil } - dir, err := ioutil.TempDir(sm.mountPoolRoot, "buildkit") + dir, err := os.MkdirTemp(sm.mountPoolRoot, "buildkit") if err != nil { return nil, nil, err } diff --git a/vendor/github.com/moby/buildkit/cache/remote.go b/vendor/github.com/moby/buildkit/cache/remote.go index d0ac594b6a..843ad24970 100644 --- a/vendor/github.com/moby/buildkit/cache/remote.go +++ b/vendor/github.com/moby/buildkit/cache/remote.go @@ -212,8 +212,8 @@ func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, refC } } - if refCfg.Compression.Force { - if needs, err := needsConversion(ctx, sr.cm.ContentStore, desc, refCfg.Compression.Type); err != nil { + if needsForceCompression(ctx, sr.cm.ContentStore, desc, refCfg) { + if needs, err := refCfg.Compression.Type.NeedsConversion(ctx, sr.cm.ContentStore, desc); err != nil { return nil, err } else if needs { // ensure the compression type. diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/export.go b/vendor/github.com/moby/buildkit/cache/remotecache/export.go index 1c3a240cfc..a0fd7ba7e2 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/export.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/export.go @@ -5,7 +5,6 @@ import ( "context" "encoding/json" "fmt" - "time" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" @@ -24,24 +23,10 @@ import ( type ResolveCacheExporterFunc func(ctx context.Context, g session.Group, attrs map[string]string) (Exporter, error) -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.NewFromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } -} - type Exporter interface { solver.CacheExporterTarget + // Name uniquely identifies the exporter + Name() string // Finalize finalizes and return metadata that are returned to the client // e.g. ExporterResponseManifestDesc Finalize(ctx context.Context) (map[string]string, error) @@ -72,6 +57,10 @@ func NewExporter(ingester content.Ingester, ref string, oci bool, compressionCon return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester, oci: oci, ref: ref, comp: compressionConfig} } +func (ce *contentCacheExporter) Name() string { + return "exporting content cache" +} + func (ce *contentCacheExporter) Config() Config { return Config{ Compression: ce.comp, @@ -107,7 +96,7 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string if !ok { return nil, errors.Errorf("missing blob %s", l.Blob) } - layerDone := oneOffProgress(ctx, fmt.Sprintf("writing layer %s", l.Blob)) + layerDone := progress.OneOff(ctx, fmt.Sprintf("writing layer %s", l.Blob)) if err := contentutil.Copy(ctx, ce.ingester, dgstPair.Provider, dgstPair.Descriptor, ce.ref, logs.LoggerFromContext(ctx)); err != nil { return nil, layerDone(errors.Wrap(err, "error writing layer blob")) } @@ -127,7 +116,7 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string Size: int64(len(dt)), MediaType: v1.CacheConfigMediaTypeV0, } - configDone := oneOffProgress(ctx, fmt.Sprintf("writing config %s", dgst)) + configDone := progress.OneOff(ctx, fmt.Sprintf("writing config %s", dgst)) if err := content.WriteBlob(ctx, ce.ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil { return nil, configDone(errors.Wrap(err, "error writing config blob")) } @@ -146,7 +135,7 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string Size: int64(len(dt)), MediaType: mfst.MediaType, } - mfstDone := oneOffProgress(ctx, fmt.Sprintf("writing manifest %s", dgst)) + mfstDone := progress.OneOff(ctx, fmt.Sprintf("writing manifest %s", dgst)) if err := content.WriteBlob(ctx, ce.ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil { return nil, mfstDone(errors.Wrap(err, "error writing manifest blob")) } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go b/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go index cf11db4959..59631a9d54 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go @@ -30,6 +30,10 @@ type exporter struct { chains *v1.CacheChains } +func (*exporter) Name() string { + return "exporting inline cache" +} + func (ce *exporter) Config() remotecache.Config { return remotecache.Config{ Compression: compression.New(compression.Default), diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go index 18c73364c0..7f3d83b70f 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go @@ -98,15 +98,28 @@ func getContentStore(ctx context.Context, sm *session.Manager, g session.Group, if err != nil { return nil, err } - return sessioncontent.NewCallerStore(caller, storeID), nil + return &unlazyProvider{sessioncontent.NewCallerStore(caller, storeID), g}, nil +} + +type unlazyProvider struct { + content.Store + s session.Group +} + +func (p *unlazyProvider) UnlazySession(desc ocispecs.Descriptor) session.Group { + return p.s } func attrsToCompression(attrs map[string]string) (*compression.Config, error) { - compressionType := compression.Default + var compressionType compression.Type if v, ok := attrs[attrLayerCompression]; ok { - if c := compression.Parse(v); c != compression.UnknownCompression { - compressionType = c + c, err := compression.Parse(v) + if err != nil { + return nil, err } + compressionType = c + } else { + compressionType = compression.Default } compressionConfig := compression.New(compressionType) if v, ok := attrs[attrForceCompression]; ok { diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go index cfe54e52aa..e3b32eb296 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go @@ -131,11 +131,15 @@ func (dsl *withDistributionSourceLabel) SnapshotLabels(descs []ocispecs.Descript } func attrsToCompression(attrs map[string]string) (*compression.Config, error) { - compressionType := compression.Default + var compressionType compression.Type if v, ok := attrs[attrLayerCompression]; ok { - if c := compression.Parse(v); c != compression.UnknownCompression { - compressionType = c + c, err := compression.Parse(v) + if err != nil { + return nil, err } + compressionType = c + } else { + compressionType = compression.Default } compressionConfig := compression.New(compressionType) if v, ok := attrs[attrForceCompression]; ok { diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go index 7ba7eb0f60..a4f7f6ad05 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go @@ -276,7 +276,7 @@ func (cs *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheR // Any of blobs in the remote must meet the specified compression option. match := false for _, desc := range r.result.Descriptors { - m := compressionopts.Type.IsMediaType(desc.MediaType) + m := compression.IsMediaType(compressionopts.Type, desc.MediaType) match = match || m if compressionopts.Force && !m { match = false diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go index 306e037f7f..8c8bbde5dc 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go @@ -146,7 +146,7 @@ func (c *item) removeLink(src *item) bool { return found } -func (c *item) AddResult(createdAt time.Time, result *solver.Remote) { +func (c *item) AddResult(_ digest.Digest, _ int, createdAt time.Time, result *solver.Remote) { c.resultTime = createdAt c.result = result } @@ -214,7 +214,7 @@ func (c *item) walkAllResults(fn func(i *item) error, visited map[*item]struct{} type nopRecord struct { } -func (c *nopRecord) AddResult(createdAt time.Time, result *solver.Remote) { +func (c *nopRecord) AddResult(_ digest.Digest, _ int, createdAt time.Time, result *solver.Remote) { } func (c *nopRecord) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) { diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go index 97d21a4520..a1b00d86f6 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go @@ -1,6 +1,6 @@ package cacheimport -// Distibutable build cache +// Distributable build cache // // Main manifest is OCI image index // https://github.com/opencontainers/image-spec/blob/master/image-index.md . @@ -13,7 +13,7 @@ package cacheimport // Cache config file layout: // //{ -// "layers": [ +// "layers": [ <- layers contains references to blobs // { // "blob": "sha256:deadbeef", <- digest of layer blob in index // "parent": -1 <- index of parent layer, -1 if no parent @@ -24,20 +24,26 @@ package cacheimport // } // ], // -// "records": [ +// "records": [ <- records contains chains of cache keys // { // "digest": "sha256:deadbeef", <- base digest for the record // }, // { // "digest": "sha256:deadbeef", // "output": 1, <- optional output index -// "layers": [ <- optional array or layer chains +// "layers": [ <- optional array of layer pointers // { // "createdat": "", -// "layer": 1, <- index to the layer +// "layer": 1, <- index to the layers array, layer is loaded with all of its parents // } // ], -// "inputs": [ <- dependant records +// "chains": [ <- optional array of layer pointer lists +// { +// "createdat": "", +// "layers": [1], <- indexes to the layers array, all layers are loaded in specified order without parents +// } +// ], +// "inputs": [ <- dependant records, this is how cache keys are linked together // [ <- index of the dependency (0) // { // "selector": "sel", <- optional selector diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go index 65a6e441f5..3c8294a602 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go @@ -61,7 +61,7 @@ func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver. return nil, err } if remote != nil { - r.AddResult(res.CreatedAt, remote) + r.AddResult("", 0, res.CreatedAt, remote) } } @@ -86,7 +86,7 @@ func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver. } if remote != nil { remote.Provider = mp - r.AddResult(res.CreatedAt, remote) + r.AddResult("", 0, res.CreatedAt, remote) } } diff --git a/vendor/github.com/moby/buildkit/cache/util/fsutil.go b/vendor/github.com/moby/buildkit/cache/util/fsutil.go index b425a002a5..e90ed45f77 100644 --- a/vendor/github.com/moby/buildkit/cache/util/fsutil.go +++ b/vendor/github.com/moby/buildkit/cache/util/fsutil.go @@ -3,7 +3,6 @@ package util import ( "context" "io" - "io/ioutil" "os" "path/filepath" @@ -59,7 +58,7 @@ func ReadFile(ctx context.Context, mount snapshot.Mountable, req ReadRequest) ([ } if req.Range == nil { - dt, err = ioutil.ReadFile(fp) + dt, err = os.ReadFile(fp) if err != nil { return errors.WithStack(err) } @@ -68,7 +67,7 @@ func ReadFile(ctx context.Context, mount snapshot.Mountable, req ReadRequest) ([ if err != nil { return errors.WithStack(err) } - dt, err = ioutil.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length))) + dt, err = io.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length))) f.Close() if err != nil { return errors.WithStack(err) diff --git a/vendor/github.com/moby/buildkit/client/build.go b/vendor/github.com/moby/buildkit/client/build.go index 25b3aa6d7c..2a4bc9e105 100644 --- a/vendor/github.com/moby/buildkit/client/build.go +++ b/vendor/github.com/moby/buildkit/client/build.go @@ -20,17 +20,14 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF } }() - if opt.Frontend != "" { - return nil, errors.New("invalid SolveOpt, Build interface cannot use Frontend") - } + feOpts := opt.FrontendAttrs + + opt.Frontend = "" if product == "" { product = apicaps.ExportedProduct } - feOpts := opt.FrontendAttrs - opt.FrontendAttrs = nil - workers, err := c.ListWorkers(ctx) if err != nil { return nil, errors.Wrap(err, "listing workers for Build") @@ -113,6 +110,19 @@ func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.Sta return g.gateway.StatFile(ctx, in, opts...) } +func (g *gatewayClientForBuild) Evaluate(ctx context.Context, in *gatewayapi.EvaluateRequest, opts ...grpc.CallOption) (*gatewayapi.EvaluateResponse, error) { + if err := g.caps.Supports(gatewayapi.CapGatewayEvaluate); err != nil { + if err2 := g.caps.Supports(gatewayapi.CapStatFile); err2 != nil { + return nil, err + } + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + _, err := g.gateway.StatFile(ctx, &gatewayapi.StatFileRequest{Ref: in.Ref, Path: "."}, opts...) + return &gatewayapi.EvaluateResponse{}, err + } + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + return g.gateway.Evaluate(ctx, in, opts...) +} + func (g *gatewayClientForBuild) Ping(ctx context.Context, in *gatewayapi.PingRequest, opts ...grpc.CallOption) (*gatewayapi.PongResponse, error) { ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) return g.gateway.Ping(ctx, in, opts...) diff --git a/vendor/github.com/moby/buildkit/client/client.go b/vendor/github.com/moby/buildkit/client/client.go index 8c9259a4a9..deac2507a9 100644 --- a/vendor/github.com/moby/buildkit/client/client.go +++ b/vendor/github.com/moby/buildkit/client/client.go @@ -4,11 +4,12 @@ import ( "context" "crypto/tls" "crypto/x509" - "io/ioutil" "net" "net/url" + "os" "strings" + contentapi "github.com/containerd/containerd/api/services/content/v1" "github.com/containerd/containerd/defaults" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" controlapi "github.com/moby/buildkit/api/services/control" @@ -168,12 +169,16 @@ func (c *Client) setupDelegatedTracing(ctx context.Context, td TracerDelegate) e return td.SetSpanExporter(ctx, e) } -func (c *Client) controlClient() controlapi.ControlClient { +func (c *Client) ControlClient() controlapi.ControlClient { return controlapi.NewControlClient(c.conn) } +func (c *Client) ContentClient() contentapi.ContentClient { + return contentapi.NewContentClient(c.conn) +} + func (c *Client) Dialer() session.Dialer { - return grpchijack.Dialer(c.controlClient()) + return grpchijack.Dialer(c.ControlClient()) } func (c *Client) Close() error { @@ -212,7 +217,7 @@ func WithCredentials(serverName, ca, cert, key string) ClientOpt { } func loadCredentials(opts *withCredentials) (grpc.DialOption, error) { - ca, err := ioutil.ReadFile(opts.CACert) + ca, err := os.ReadFile(opts.CACert) if err != nil { return nil, errors.Wrap(err, "could not read ca certificate") } @@ -234,7 +239,6 @@ func loadCredentials(opts *withCredentials) (grpc.DialOption, error) { return nil, errors.Wrap(err, "could not read certificate/key") } cfg.Certificates = []tls.Certificate{cert} - cfg.BuildNameToCertificate() } return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil diff --git a/vendor/github.com/moby/buildkit/client/diskusage.go b/vendor/github.com/moby/buildkit/client/diskusage.go index 2a2373f9d3..0918c7dcd4 100644 --- a/vendor/github.com/moby/buildkit/client/diskusage.go +++ b/vendor/github.com/moby/buildkit/client/diskusage.go @@ -10,18 +10,18 @@ import ( ) type UsageInfo struct { - ID string - Mutable bool - InUse bool - Size int64 + ID string `json:"id"` + Mutable bool `json:"mutable"` + InUse bool `json:"inUse"` + Size int64 `json:"size"` - CreatedAt time.Time - LastUsedAt *time.Time - UsageCount int - Parents []string - Description string - RecordType UsageRecordType - Shared bool + CreatedAt time.Time `json:"createdAt"` + LastUsedAt *time.Time `json:"lastUsedAt"` + UsageCount int `json:"usageCount"` + Parents []string `json:"parents"` + Description string `json:"description"` + RecordType UsageRecordType `json:"recordType"` + Shared bool `json:"shared"` } func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) { @@ -31,7 +31,7 @@ func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*Usa } req := &controlapi.DiskUsageRequest{Filter: info.Filter} - resp, err := c.controlClient().DiskUsage(ctx, req) + resp, err := c.ControlClient().DiskUsage(ctx, req) if err != nil { return nil, errors.Wrap(err, "failed to call diskusage") } diff --git a/vendor/github.com/moby/buildkit/client/info.go b/vendor/github.com/moby/buildkit/client/info.go new file mode 100644 index 0000000000..d5bdbcec89 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/info.go @@ -0,0 +1,40 @@ +package client + +import ( + "context" + + controlapi "github.com/moby/buildkit/api/services/control" + apitypes "github.com/moby/buildkit/api/types" + "github.com/pkg/errors" +) + +type Info struct { + BuildkitVersion BuildkitVersion `json:"buildkitVersion"` +} + +type BuildkitVersion struct { + Package string `json:"package"` + Version string `json:"version"` + Revision string `json:"revision"` +} + +func (c *Client) Info(ctx context.Context) (*Info, error) { + res, err := c.ControlClient().Info(ctx, &controlapi.InfoRequest{}) + if err != nil { + return nil, errors.Wrap(err, "failed to call info") + } + return &Info{ + BuildkitVersion: fromAPIBuildkitVersion(res.BuildkitVersion), + }, nil +} + +func fromAPIBuildkitVersion(in *apitypes.BuildkitVersion) BuildkitVersion { + if in == nil { + return BuildkitVersion{} + } + return BuildkitVersion{ + Package: in.Package, + Version: in.Version, + Revision: in.Revision, + } +} diff --git a/vendor/github.com/moby/buildkit/client/llb/exec.go b/vendor/github.com/moby/buildkit/client/llb/exec.go index 994804a139..2b1d9bd3f1 100644 --- a/vendor/github.com/moby/buildkit/client/llb/exec.go +++ b/vendor/github.com/moby/buildkit/client/llb/exec.go @@ -192,12 +192,13 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] } meta := &pb.Meta{ - Args: args, - Env: env.ToArray(), - Cwd: cwd, - User: user, - Hostname: hostname, - CgroupParent: cgrpParent, + Args: args, + Env: env.ToArray(), + Cwd: cwd, + User: user, + Hostname: hostname, + CgroupParent: cgrpParent, + RemoveMountStubsRecursive: true, } extraHosts, err := getExtraHosts(e.base)(ctx, c) diff --git a/vendor/github.com/moby/buildkit/client/llb/marshal.go b/vendor/github.com/moby/buildkit/client/llb/marshal.go index e59e560ee9..3b02299e43 100644 --- a/vendor/github.com/moby/buildkit/client/llb/marshal.go +++ b/vendor/github.com/moby/buildkit/client/llb/marshal.go @@ -2,7 +2,6 @@ package llb import ( "io" - "io/ioutil" "github.com/containerd/containerd/platforms" "github.com/moby/buildkit/solver/pb" @@ -67,7 +66,7 @@ func WriteTo(def *Definition, w io.Writer) error { } func ReadFrom(r io.Reader) (*Definition, error) { - b, err := ioutil.ReadAll(r) + b, err := io.ReadAll(r) if err != nil { return nil, err } @@ -88,10 +87,7 @@ func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) { c.Platform = p } - for _, wc := range override.WorkerConstraints { - c.WorkerConstraints = append(c.WorkerConstraints, wc) - } - + c.WorkerConstraints = append(c.WorkerConstraints, override.WorkerConstraints...) c.Metadata = mergeMetadata(c.Metadata, override.Metadata) if c.Platform == nil { diff --git a/vendor/github.com/moby/buildkit/client/llb/resolver.go b/vendor/github.com/moby/buildkit/client/llb/resolver.go index af1edc1071..b3b9cdf751 100644 --- a/vendor/github.com/moby/buildkit/client/llb/resolver.go +++ b/vendor/github.com/moby/buildkit/client/llb/resolver.go @@ -23,13 +23,35 @@ func ResolveDigest(v bool) ImageOption { }) } +func WithLayerLimit(l int) ImageOption { + return imageOptionFunc(func(ii *ImageInfo) { + ii.layerLimit = &l + }) +} + // ImageMetaResolver can resolve image config metadata from a reference type ImageMetaResolver interface { ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (digest.Digest, []byte, error) } +type ResolverType int + +const ( + ResolverTypeRegistry ResolverType = iota + ResolverTypeOCILayout +) + type ResolveImageConfigOpt struct { + ResolverType + Platform *ocispecs.Platform ResolveMode string LogName string + + Store ResolveImageConfigOptStore +} + +type ResolveImageConfigOptStore struct { + SessionID string + StoreID string } diff --git a/vendor/github.com/moby/buildkit/client/llb/source.go b/vendor/github.com/moby/buildkit/client/llb/source.go index c1be90b704..27c8c1b617 100644 --- a/vendor/github.com/moby/buildkit/client/llb/source.go +++ b/vendor/github.com/moby/buildkit/client/llb/source.go @@ -116,6 +116,11 @@ func Image(ref string, opts ...ImageOption) State { attrs[pb.AttrImageRecordType] = info.RecordType } + if ll := info.layerLimit; ll != nil { + attrs[pb.AttrImageLayerLimit] = strconv.FormatInt(int64(*ll), 10) + addCap(&info.Constraints, pb.CapSourceImageLayerLimit) + } + src := NewSource("docker-image://"+ref, attrs, info.Constraints) // controversial if err != nil { src.err = err @@ -127,8 +132,9 @@ func Image(ref string, opts ...ImageOption) State { p = c.Platform } _, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, ResolveImageConfigOpt{ - Platform: p, - ResolveMode: info.resolveMode.String(), + Platform: p, + ResolveMode: info.resolveMode.String(), + ResolverType: ResolverTypeRegistry, }) if err != nil { return State{}, err @@ -142,8 +148,9 @@ func Image(ref string, opts ...ImageOption) State { p = c.Platform } dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{ - Platform: p, - ResolveMode: info.resolveMode.String(), + Platform: p, + ResolveMode: info.resolveMode.String(), + ResolverType: ResolverTypeRegistry, }) if err != nil { return State{}, err @@ -204,6 +211,7 @@ type ImageInfo struct { metaResolver ImageMetaResolver resolveDigest bool resolveMode ResolveMode + layerLimit *int RecordType string } @@ -446,6 +454,59 @@ func Differ(t DiffType, required bool) LocalOption { }) } +func OCILayout(ref string, opts ...OCILayoutOption) State { + gi := &OCILayoutInfo{} + + for _, o := range opts { + o.SetOCILayoutOption(gi) + } + attrs := map[string]string{} + if gi.sessionID != "" { + attrs[pb.AttrOCILayoutSessionID] = gi.sessionID + } + if gi.storeID != "" { + attrs[pb.AttrOCILayoutStoreID] = gi.storeID + } + if gi.layerLimit != nil { + attrs[pb.AttrOCILayoutLayerLimit] = strconv.FormatInt(int64(*gi.layerLimit), 10) + } + + addCap(&gi.Constraints, pb.CapSourceOCILayout) + + source := NewSource("oci-layout://"+ref, attrs, gi.Constraints) + return NewState(source.Output()) +} + +type OCILayoutOption interface { + SetOCILayoutOption(*OCILayoutInfo) +} + +type ociLayoutOptionFunc func(*OCILayoutInfo) + +func (fn ociLayoutOptionFunc) SetOCILayoutOption(li *OCILayoutInfo) { + fn(li) +} + +func OCIStore(sessionID string, storeID string) OCILayoutOption { + return ociLayoutOptionFunc(func(oi *OCILayoutInfo) { + oi.sessionID = sessionID + oi.storeID = storeID + }) +} + +func OCILayerLimit(limit int) OCILayoutOption { + return ociLayoutOptionFunc(func(oi *OCILayoutInfo) { + oi.layerLimit = &limit + }) +} + +type OCILayoutInfo struct { + constraintsWrapper + sessionID string + storeID string + layerLimit *int +} + type DiffType string const ( @@ -549,7 +610,7 @@ func Chown(uid, gid int) HTTPOption { } func platformSpecificSource(id string) bool { - return strings.HasPrefix(id, "docker-image://") + return strings.HasPrefix(id, "docker-image://") || strings.HasPrefix(id, "oci-layout://") } func addCap(c *Constraints, id apicaps.CapID) { diff --git a/vendor/github.com/moby/buildkit/client/llb/sourcemap.go b/vendor/github.com/moby/buildkit/client/llb/sourcemap.go index 149355d92e..17cc1de6f5 100644 --- a/vendor/github.com/moby/buildkit/client/llb/sourcemap.go +++ b/vendor/github.com/moby/buildkit/client/llb/sourcemap.go @@ -61,7 +61,7 @@ func (smc *sourceMapCollector) Add(dgst digest.Digest, ls []*SourceLocation) { } smc.index[l.SourceMap] = idx } - smc.locations[dgst] = ls + smc.locations[dgst] = append(smc.locations[dgst], ls...) } func (smc *sourceMapCollector) Marshal(ctx context.Context, co ...ConstraintsOpt) (*pb.Source, error) { diff --git a/vendor/github.com/moby/buildkit/client/llb/state.go b/vendor/github.com/moby/buildkit/client/llb/state.go index 28ea494bae..7d35f3be59 100644 --- a/vendor/github.com/moby/buildkit/client/llb/state.go +++ b/vendor/github.com/moby/buildkit/client/llb/state.go @@ -199,10 +199,10 @@ func marshal(ctx context.Context, v Vertex, def *Definition, s *sourceMapCollect if opMeta != nil { def.Metadata[dgst] = mergeMetadata(def.Metadata[dgst], *opMeta) } + s.Add(dgst, sls) if _, ok := cache[dgst]; ok { return def, nil } - s.Add(dgst, sls) def.Def = append(def.Def, dt) cache[dgst] = struct{}{} return def, nil @@ -455,6 +455,7 @@ type ConstraintsOpt interface { HTTPOption ImageOption GitOption + OCILayoutOption } type constraintsOptFunc func(m *Constraints) @@ -471,6 +472,10 @@ func (fn constraintsOptFunc) SetLocalOption(li *LocalInfo) { li.applyConstraints(fn) } +func (fn constraintsOptFunc) SetOCILayoutOption(oi *OCILayoutInfo) { + oi.applyConstraints(fn) +} + func (fn constraintsOptFunc) SetHTTPOption(hi *HTTPInfo) { hi.applyConstraints(fn) } @@ -612,6 +617,7 @@ var ( LinuxArmel = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm", Variant: "v6"}) LinuxArm64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm64"}) LinuxS390x = Platform(ocispecs.Platform{OS: "linux", Architecture: "s390x"}) + LinuxPpc64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "ppc64"}) LinuxPpc64le = Platform(ocispecs.Platform{OS: "linux", Architecture: "ppc64le"}) Darwin = Platform(ocispecs.Platform{OS: "darwin", Architecture: "amd64"}) Windows = Platform(ocispecs.Platform{OS: "windows", Architecture: "amd64"}) @@ -619,9 +625,7 @@ var ( func Require(filters ...string) ConstraintsOpt { return constraintsOptFunc(func(c *Constraints) { - for _, f := range filters { - c.WorkerConstraints = append(c.WorkerConstraints, f) - } + c.WorkerConstraints = append(c.WorkerConstraints, filters...) }) } diff --git a/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go b/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go index a9c100a95b..3731ff36bb 100644 --- a/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go +++ b/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go @@ -2,8 +2,9 @@ package ociindex import ( "encoding/json" - "io/ioutil" + "io" "os" + "path" "github.com/gofrs/flock" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" @@ -11,16 +12,132 @@ import ( ) const ( - // IndexJSONLockFileSuffix is the suffix of the lock file - IndexJSONLockFileSuffix = ".lock" + // indexFile is the name of the index file + indexFile = "index.json" + + // lockFileSuffix is the suffix of the lock file + lockFileSuffix = ".lock" ) -// PutDescToIndex puts desc to index with tag. -// Existing manifests with the same tag will be removed from the index. -func PutDescToIndex(index *ocispecs.Index, desc ocispecs.Descriptor, tag string) error { - if index == nil { - index = &ocispecs.Index{} +type StoreIndex struct { + indexPath string + lockPath string +} + +func NewStoreIndex(storePath string) StoreIndex { + indexPath := path.Join(storePath, indexFile) + return StoreIndex{ + indexPath: indexPath, + lockPath: indexPath + lockFileSuffix, } +} + +func (s StoreIndex) Read() (*ocispecs.Index, error) { + lock := flock.New(s.lockPath) + locked, err := lock.TryRLock() + if err != nil { + return nil, errors.Wrapf(err, "could not lock %s", s.lockPath) + } + if !locked { + return nil, errors.Errorf("could not lock %s", s.lockPath) + } + defer func() { + lock.Unlock() + os.RemoveAll(s.lockPath) + }() + + b, err := os.ReadFile(s.indexPath) + if err != nil { + return nil, errors.Wrapf(err, "could not read %s", s.indexPath) + } + var idx ocispecs.Index + if err := json.Unmarshal(b, &idx); err != nil { + return nil, errors.Wrapf(err, "could not unmarshal %s (%q)", s.indexPath, string(b)) + } + return &idx, nil +} + +func (s StoreIndex) Put(tag string, desc ocispecs.Descriptor) error { + lock := flock.New(s.lockPath) + locked, err := lock.TryLock() + if err != nil { + return errors.Wrapf(err, "could not lock %s", s.lockPath) + } + if !locked { + return errors.Errorf("could not lock %s", s.lockPath) + } + defer func() { + lock.Unlock() + os.RemoveAll(s.lockPath) + }() + + f, err := os.OpenFile(s.indexPath, os.O_RDWR|os.O_CREATE, 0644) + if err != nil { + return errors.Wrapf(err, "could not open %s", s.indexPath) + } + defer f.Close() + + var idx ocispecs.Index + b, err := io.ReadAll(f) + if err != nil { + return errors.Wrapf(err, "could not read %s", s.indexPath) + } + if len(b) > 0 { + if err := json.Unmarshal(b, &idx); err != nil { + return errors.Wrapf(err, "could not unmarshal %s (%q)", s.indexPath, string(b)) + } + } + + if err = insertDesc(&idx, desc, tag); err != nil { + return err + } + + b, err = json.Marshal(idx) + if err != nil { + return err + } + if _, err = f.WriteAt(b, 0); err != nil { + return err + } + if err = f.Truncate(int64(len(b))); err != nil { + return err + } + return nil +} + +func (s StoreIndex) Get(tag string) (*ocispecs.Descriptor, error) { + idx, err := s.Read() + if err != nil { + return nil, err + } + + for _, m := range idx.Manifests { + if t, ok := m.Annotations[ocispecs.AnnotationRefName]; ok && t == tag { + return &m, nil + } + } + return nil, nil +} + +func (s StoreIndex) GetSingle() (*ocispecs.Descriptor, error) { + idx, err := s.Read() + if err != nil { + return nil, err + } + + if len(idx.Manifests) == 1 { + return &idx.Manifests[0], nil + } + return nil, nil +} + +// insertDesc puts desc to index with tag. +// Existing manifests with the same tag will be removed from the index. +func insertDesc(index *ocispecs.Index, desc ocispecs.Descriptor, tag string) error { + if index == nil { + return nil + } + if index.SchemaVersion == 0 { index.SchemaVersion = 2 } @@ -41,73 +158,3 @@ func PutDescToIndex(index *ocispecs.Index, desc ocispecs.Descriptor, tag string) index.Manifests = append(index.Manifests, desc) return nil } - -func PutDescToIndexJSONFileLocked(indexJSONPath string, desc ocispecs.Descriptor, tag string) error { - lockPath := indexJSONPath + IndexJSONLockFileSuffix - lock := flock.New(lockPath) - locked, err := lock.TryLock() - if err != nil { - return errors.Wrapf(err, "could not lock %s", lockPath) - } - if !locked { - return errors.Errorf("could not lock %s", lockPath) - } - defer func() { - lock.Unlock() - os.RemoveAll(lockPath) - }() - f, err := os.OpenFile(indexJSONPath, os.O_RDWR|os.O_CREATE, 0644) - if err != nil { - return errors.Wrapf(err, "could not open %s", indexJSONPath) - } - defer f.Close() - var idx ocispecs.Index - b, err := ioutil.ReadAll(f) - if err != nil { - return errors.Wrapf(err, "could not read %s", indexJSONPath) - } - if len(b) > 0 { - if err := json.Unmarshal(b, &idx); err != nil { - return errors.Wrapf(err, "could not unmarshal %s (%q)", indexJSONPath, string(b)) - } - } - if err = PutDescToIndex(&idx, desc, tag); err != nil { - return err - } - b, err = json.Marshal(idx) - if err != nil { - return err - } - if _, err = f.WriteAt(b, 0); err != nil { - return err - } - if err = f.Truncate(int64(len(b))); err != nil { - return err - } - return nil -} - -func ReadIndexJSONFileLocked(indexJSONPath string) (*ocispecs.Index, error) { - lockPath := indexJSONPath + IndexJSONLockFileSuffix - lock := flock.New(lockPath) - locked, err := lock.TryRLock() - if err != nil { - return nil, errors.Wrapf(err, "could not lock %s", lockPath) - } - if !locked { - return nil, errors.Errorf("could not lock %s", lockPath) - } - defer func() { - lock.Unlock() - os.RemoveAll(lockPath) - }() - b, err := ioutil.ReadFile(indexJSONPath) - if err != nil { - return nil, errors.Wrapf(err, "could not read %s", indexJSONPath) - } - var idx ocispecs.Index - if err := json.Unmarshal(b, &idx); err != nil { - return nil, errors.Wrapf(err, "could not unmarshal %s (%q)", indexJSONPath, string(b)) - } - return &idx, nil -} diff --git a/vendor/github.com/moby/buildkit/client/prune.go b/vendor/github.com/moby/buildkit/client/prune.go index ed4815cb5a..af84913855 100644 --- a/vendor/github.com/moby/buildkit/client/prune.go +++ b/vendor/github.com/moby/buildkit/client/prune.go @@ -23,7 +23,7 @@ func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOpti if info.All { req.All = true } - cl, err := c.controlClient().Prune(ctx, req) + cl, err := c.ControlClient().Prune(ctx, req) if err != nil { return errors.Wrap(err, "failed to call prune") } diff --git a/vendor/github.com/moby/buildkit/client/solve.go b/vendor/github.com/moby/buildkit/client/solve.go index f14d9c410d..65183d61cd 100644 --- a/vendor/github.com/moby/buildkit/client/solve.go +++ b/vendor/github.com/moby/buildkit/client/solve.go @@ -2,6 +2,7 @@ package client import ( "context" + "encoding/base64" "encoding/json" "io" "os" @@ -14,16 +15,19 @@ import ( controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/client/ociindex" + "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" sessioncontent "github.com/moby/buildkit/session/content" "github.com/moby/buildkit/session/filesync" "github.com/moby/buildkit/session/grpchijack" "github.com/moby/buildkit/solver/pb" + spb "github.com/moby/buildkit/sourcepolicy/pb" "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/entitlements" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" fstypes "github.com/tonistiigi/fsutil/types" "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" @@ -32,6 +36,7 @@ import ( type SolveOpt struct { Exports []ExportEntry LocalDirs map[string]string + OCIStores map[string]content.Store SharedKey string Frontend string FrontendAttrs map[string]string @@ -42,6 +47,9 @@ type SolveOpt struct { AllowedEntitlements []entitlements.Entitlement SharedSession *session.Session // TODO: refactor to better session syncing SessionPreInitialized bool // TODO: refactor to better session syncing + Internal bool + SourcePolicy *spb.Policy + Ref string } type ExportEntry struct { @@ -88,6 +96,9 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG } ref := identity.NewID() + if opt.Ref != "" { + ref = opt.Ref + } eg, ctx := errgroup.WithContext(ctx) statusContext, cancelStatus := context.WithCancel(context.Background()) @@ -122,6 +133,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG ex = opt.Exports[0] } + storesToUpdate := []string{} + if !opt.SessionPreInitialized { if len(syncedDirs) > 0 { s.Allow(filesync.NewFSSyncProvider(syncedDirs)) @@ -131,50 +144,85 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG s.Allow(a) } + contentStores := map[string]content.Store{} + for key, store := range cacheOpt.contentStores { + contentStores[key] = store + } + for key, store := range opt.OCIStores { + key2 := "oci:" + key + if _, ok := contentStores[key2]; ok { + return nil, errors.Errorf("oci store key %q already exists", key) + } + contentStores[key2] = store + } + + var supportFile bool + var supportDir bool switch ex.Type { case ExporterLocal: - if ex.Output != nil { - return nil, errors.New("output file writer is not supported by local exporter") - } - if ex.OutputDir == "" { - return nil, errors.New("output directory is required for local exporter") - } - s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir)) - case ExporterOCI, ExporterDocker, ExporterTar: - if ex.OutputDir != "" { - return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type) - } + supportDir = true + case ExporterTar: + supportFile = true + case ExporterOCI, ExporterDocker: + supportDir = ex.OutputDir != "" + supportFile = ex.Output != nil + } + + if supportFile && supportDir { + return nil, errors.Errorf("both file and directory output is not support by %s exporter", ex.Type) + } + if !supportFile && ex.Output != nil { + return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type) + } + if !supportDir && ex.OutputDir != "" { + return nil, errors.Errorf("output directory is not supported by %s exporter", ex.Type) + } + + if supportFile { if ex.Output == nil { return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type) } s.Allow(filesync.NewFSSyncTarget(ex.Output)) - default: - if ex.Output != nil { - return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type) + } + if supportDir { + if ex.OutputDir == "" { + return nil, errors.Errorf("output directory is required for %s exporter", ex.Type) } - if ex.OutputDir != "" { - return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type) + switch ex.Type { + case ExporterOCI, ExporterDocker: + if err := os.MkdirAll(ex.OutputDir, 0755); err != nil { + return nil, err + } + cs, err := contentlocal.NewStore(ex.OutputDir) + if err != nil { + return nil, err + } + contentStores["export"] = cs + storesToUpdate = append(storesToUpdate, ex.OutputDir) + default: + s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir)) } } - if len(cacheOpt.contentStores) > 0 { - s.Allow(sessioncontent.NewAttachable(cacheOpt.contentStores)) + if len(contentStores) > 0 { + s.Allow(sessioncontent.NewAttachable(contentStores)) } eg.Go(func() error { sd := c.sessionDialer if sd == nil { - sd = grpchijack.Dialer(c.controlClient()) + sd = grpchijack.Dialer(c.ControlClient()) } return s.Run(statusContext, sd) }) } + frontendAttrs := map[string]string{} + for k, v := range opt.FrontendAttrs { + frontendAttrs[k] = v + } for k, v := range cacheOpt.frontendAttrs { - if opt.FrontendAttrs == nil { - opt.FrontendAttrs = map[string]string{} - } - opt.FrontendAttrs[k] = v + frontendAttrs[k] = v } solveCtx, cancelSolve := context.WithCancel(ctx) @@ -188,8 +236,10 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG <-time.After(3 * time.Second) cancelStatus() }() - bklog.G(ctx).Debugf("stopping session") - s.Close() + if !opt.SessionPreInitialized { + bklog.G(ctx).Debugf("stopping session") + s.Close() + } }() var pbd *pb.Definition if def != nil { @@ -205,17 +255,19 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG frontendInputs[key] = def.ToPB() } - resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{ + resp, err := c.ControlClient().Solve(ctx, &controlapi.SolveRequest{ Ref: ref, Definition: pbd, Exporter: ex.Type, ExporterAttrs: ex.Attrs, Session: s.ID(), Frontend: opt.Frontend, - FrontendAttrs: opt.FrontendAttrs, + FrontendAttrs: frontendAttrs, FrontendInputs: frontendInputs, Cache: cacheOpt.options, Entitlements: opt.AllowedEntitlements, + Internal: opt.Internal, + SourcePolicy: opt.SourcePolicy, }) if err != nil { return errors.Wrap(err, "failed to solve") @@ -228,7 +280,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG if runGateway != nil { eg.Go(func() error { - err := runGateway(ref, s, opt.FrontendAttrs) + err := runGateway(ref, s, frontendAttrs) if err == nil { return nil } @@ -249,7 +301,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG } eg.Go(func() error { - stream, err := c.controlClient().Status(statusContext, &controlapi.StatusRequest{ + stream, err := c.ControlClient().Status(statusContext, &controlapi.StatusRequest{ Ref: ref, }) if err != nil { @@ -263,52 +315,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG } return errors.Wrap(err, "failed to receive status") } - s := SolveStatus{} - for _, v := range resp.Vertexes { - s.Vertexes = append(s.Vertexes, &Vertex{ - Digest: v.Digest, - Inputs: v.Inputs, - Name: v.Name, - Started: v.Started, - Completed: v.Completed, - Error: v.Error, - Cached: v.Cached, - ProgressGroup: v.ProgressGroup, - }) - } - for _, v := range resp.Statuses { - s.Statuses = append(s.Statuses, &VertexStatus{ - ID: v.ID, - Vertex: v.Vertex, - Name: v.Name, - Total: v.Total, - Current: v.Current, - Timestamp: v.Timestamp, - Started: v.Started, - Completed: v.Completed, - }) - } - for _, v := range resp.Logs { - s.Logs = append(s.Logs, &VertexLog{ - Vertex: v.Vertex, - Stream: int(v.Stream), - Data: v.Msg, - Timestamp: v.Timestamp, - }) - } - for _, v := range resp.Warnings { - s.Warnings = append(s.Warnings, &VertexWarning{ - Vertex: v.Vertex, - Level: int(v.Level), - Short: v.Short, - Detail: v.Detail, - URL: v.Url, - SourceInfo: v.Info, - Range: v.Ranges, - }) - } if statusChan != nil { - statusChan <- &s + statusChan <- NewSolveStatus(resp) } } }) @@ -323,8 +331,29 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG if err = json.Unmarshal([]byte(manifestDescJSON), &manifestDesc); err != nil { return nil, err } - for indexJSONPath, tag := range cacheOpt.indicesToUpdate { - if err = ociindex.PutDescToIndexJSONFileLocked(indexJSONPath, manifestDesc, tag); err != nil { + for storePath, tag := range cacheOpt.storesToUpdate { + idx := ociindex.NewStoreIndex(storePath) + if err := idx.Put(tag, manifestDesc); err != nil { + return nil, err + } + } + } + if manifestDescDt := res.ExporterResponse[exptypes.ExporterImageDescriptorKey]; manifestDescDt != "" { + manifestDescDt, err := base64.StdEncoding.DecodeString(manifestDescDt) + if err != nil { + return nil, err + } + var manifestDesc ocispecs.Descriptor + if err = json.Unmarshal([]byte(manifestDescDt), &manifestDesc); err != nil { + return nil, err + } + for _, storePath := range storesToUpdate { + tag := "latest" + if t, ok := res.ExporterResponse["image.name"]; ok { + tag = t + } + idx := ociindex.NewStoreIndex(storePath) + if err := idx.Put(tag, manifestDesc); err != nil { return nil, err } } @@ -332,7 +361,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG return res, nil } -func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) { +func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) (filesync.StaticDirSource, error) { for _, d := range localDirs { fi, err := os.Stat(d) if err != nil { @@ -342,16 +371,16 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file return nil, errors.Errorf("%s not a directory", d) } } - resetUIDAndGID := func(p string, st *fstypes.Stat) bool { + resetUIDAndGID := func(p string, st *fstypes.Stat) fsutil.MapResult { st.Uid = 0 st.Gid = 0 - return true + return fsutil.MapResultKeep } - dirs := make([]filesync.SyncedDir, 0, len(localDirs)) + dirs := make(filesync.StaticDirSource, len(localDirs)) if def == nil { for name, d := range localDirs { - dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID}) + dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID} } } else { for _, dt := range def.Def { @@ -366,7 +395,7 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file if !ok { return nil, errors.Errorf("local directory %s not enabled", name) } - dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID}) + dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID} } } } @@ -383,24 +412,20 @@ func defaultSessionName() string { } type cacheOptions struct { - options controlapi.CacheOptions - contentStores map[string]content.Store // key: ID of content store ("local:" + csDir) - indicesToUpdate map[string]string // key: index.JSON file name, value: tag - frontendAttrs map[string]string + options controlapi.CacheOptions + contentStores map[string]content.Store // key: ID of content store ("local:" + csDir) + storesToUpdate map[string]string // key: path to content store, value: tag + frontendAttrs map[string]string } func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cacheOptions, error) { var ( cacheExports []*controlapi.CacheOptionsEntry cacheImports []*controlapi.CacheOptionsEntry - // legacy API is used for registry caches, because the daemon might not support the new API - legacyExportRef string - legacyImportRefs []string ) contentStores := make(map[string]content.Store) - indicesToUpdate := make(map[string]string) // key: index.JSON file name, value: tag + storesToUpdate := make(map[string]string) frontendAttrs := make(map[string]string) - legacyExportAttrs := make(map[string]string) for _, ex := range opt.CacheExports { if ex.Type == "local" { csDir := ex.Attrs["dest"] @@ -415,26 +440,26 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach return nil, err } contentStores["local:"+csDir] = cs - // TODO(AkihiroSuda): support custom index JSON path and tag - indexJSONPath := filepath.Join(csDir, "index.json") - indicesToUpdate[indexJSONPath] = "latest" - } - if ex.Type == "registry" && legacyExportRef == "" { - legacyExportRef = ex.Attrs["ref"] - for k, v := range ex.Attrs { - if k != "ref" { - legacyExportAttrs[k] = v - } + + tag := "latest" + if t, ok := ex.Attrs["tag"]; ok { + tag = t } - } else { - cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{ - Type: ex.Type, - Attrs: ex.Attrs, - }) + // TODO(AkihiroSuda): support custom index JSON path and tag + storesToUpdate[csDir] = tag } + if ex.Type == "registry" { + regRef := ex.Attrs["ref"] + if regRef == "" { + return nil, errors.New("registry cache exporter requires ref") + } + } + cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{ + Type: ex.Type, + Attrs: ex.Attrs, + }) } for _, im := range opt.CacheImports { - attrs := im.Attrs if im.Type == "local" { csDir := im.Attrs["src"] if csDir == "" { @@ -445,41 +470,40 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach bklog.G(ctx).Warning("local cache import at " + csDir + " not found due to err: " + err.Error()) continue } - // if digest is not specified, load from "latest" tag - if attrs["digest"] == "" { - idx, err := ociindex.ReadIndexJSONFileLocked(filepath.Join(csDir, "index.json")) + // if digest is not specified, attempt to load from tag + if im.Attrs["digest"] == "" { + tag := "latest" + if t, ok := im.Attrs["tag"]; ok { + tag = t + } + + idx := ociindex.NewStoreIndex(csDir) + desc, err := idx.Get(tag) if err != nil { bklog.G(ctx).Warning("local cache import at " + csDir + " not found due to err: " + err.Error()) continue } - for _, m := range idx.Manifests { - if (m.Annotations[ocispecs.AnnotationRefName] == "latest" && attrs["tag"] == "") || (attrs["tag"] != "" && m.Annotations[ocispecs.AnnotationRefName] == attrs["tag"]) { - attrs["digest"] = string(m.Digest) - break - } - } - if attrs["digest"] == "" { - return nil, errors.New("local cache importer requires either explicit digest, \"latest\" tag or custom tag on index.json") + if desc != nil { + im.Attrs["digest"] = desc.Digest.String() } } + if im.Attrs["digest"] == "" { + return nil, errors.New("local cache importer requires either explicit digest, \"latest\" tag or custom tag on index.json") + } contentStores["local:"+csDir] = cs } if im.Type == "registry" { - legacyImportRef := attrs["ref"] - legacyImportRefs = append(legacyImportRefs, legacyImportRef) - } else { - cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{ - Type: im.Type, - Attrs: attrs, - }) + regRef := im.Attrs["ref"] + if regRef == "" { + return nil, errors.New("registry cache importer requires ref") + } } + cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{ + Type: im.Type, + Attrs: im.Attrs, + }) } if opt.Frontend != "" || isGateway { - // use legacy API for registry importers, because the frontend might not support the new API - if len(legacyImportRefs) > 0 { - frontendAttrs["cache-from"] = strings.Join(legacyImportRefs, ",") - } - // use new API for other importers if len(cacheImports) > 0 { s, err := json.Marshal(cacheImports) if err != nil { @@ -490,17 +514,12 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach } res := cacheOptions{ options: controlapi.CacheOptions{ - // old API (for registry caches, planned to be removed in early 2019) - ExportRefDeprecated: legacyExportRef, - ExportAttrsDeprecated: legacyExportAttrs, - ImportRefsDeprecated: legacyImportRefs, - // new API Exports: cacheExports, Imports: cacheImports, }, - contentStores: contentStores, - indicesToUpdate: indicesToUpdate, - frontendAttrs: frontendAttrs, + contentStores: contentStores, + storesToUpdate: storesToUpdate, + frontendAttrs: frontendAttrs, } return &res, nil } diff --git a/vendor/github.com/moby/buildkit/client/status.go b/vendor/github.com/moby/buildkit/client/status.go new file mode 100644 index 0000000000..d692094af3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/status.go @@ -0,0 +1,125 @@ +package client + +import ( + controlapi "github.com/moby/buildkit/api/services/control" +) + +var emptyLogVertexSize int + +func init() { + emptyLogVertex := controlapi.VertexLog{} + emptyLogVertexSize = emptyLogVertex.Size() +} + +func NewSolveStatus(resp *controlapi.StatusResponse) *SolveStatus { + s := &SolveStatus{} + for _, v := range resp.Vertexes { + s.Vertexes = append(s.Vertexes, &Vertex{ + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + ProgressGroup: v.ProgressGroup, + }) + } + for _, v := range resp.Statuses { + s.Statuses = append(s.Statuses, &VertexStatus{ + ID: v.ID, + Vertex: v.Vertex, + Name: v.Name, + Total: v.Total, + Current: v.Current, + Timestamp: v.Timestamp, + Started: v.Started, + Completed: v.Completed, + }) + } + for _, v := range resp.Logs { + s.Logs = append(s.Logs, &VertexLog{ + Vertex: v.Vertex, + Stream: int(v.Stream), + Data: v.Msg, + Timestamp: v.Timestamp, + }) + } + for _, v := range resp.Warnings { + s.Warnings = append(s.Warnings, &VertexWarning{ + Vertex: v.Vertex, + Level: int(v.Level), + Short: v.Short, + Detail: v.Detail, + URL: v.Url, + SourceInfo: v.Info, + Range: v.Ranges, + }) + } + return s +} + +func (ss *SolveStatus) Marshal() (out []*controlapi.StatusResponse) { + logSize := 0 + for { + retry := false + sr := controlapi.StatusResponse{} + for _, v := range ss.Vertexes { + sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{ + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + ProgressGroup: v.ProgressGroup, + }) + } + for _, v := range ss.Statuses { + sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{ + ID: v.ID, + Vertex: v.Vertex, + Name: v.Name, + Current: v.Current, + Total: v.Total, + Timestamp: v.Timestamp, + Started: v.Started, + Completed: v.Completed, + }) + } + for i, v := range ss.Logs { + sr.Logs = append(sr.Logs, &controlapi.VertexLog{ + Vertex: v.Vertex, + Stream: int64(v.Stream), + Msg: v.Data, + Timestamp: v.Timestamp, + }) + logSize += len(v.Data) + emptyLogVertexSize + // avoid logs growing big and split apart if they do + if logSize > 1024*1024 { + ss.Vertexes = nil + ss.Statuses = nil + ss.Logs = ss.Logs[i+1:] + retry = true + break + } + } + for _, v := range ss.Warnings { + sr.Warnings = append(sr.Warnings, &controlapi.VertexWarning{ + Vertex: v.Vertex, + Level: int64(v.Level), + Short: v.Short, + Detail: v.Detail, + Info: v.SourceInfo, + Ranges: v.Range, + Url: v.URL, + }) + } + out = append(out, &sr) + if !retry { + break + } + } + return +} diff --git a/vendor/github.com/moby/buildkit/client/workers.go b/vendor/github.com/moby/buildkit/client/workers.go index e5331cd608..b7f6f6725d 100644 --- a/vendor/github.com/moby/buildkit/client/workers.go +++ b/vendor/github.com/moby/buildkit/client/workers.go @@ -13,10 +13,11 @@ import ( // WorkerInfo contains information about a worker type WorkerInfo struct { - ID string `json:"id"` - Labels map[string]string `json:"labels"` - Platforms []ocispecs.Platform `json:"platforms"` - GCPolicy []PruneInfo `json:"gcPolicy"` + ID string `json:"id"` + Labels map[string]string `json:"labels"` + Platforms []ocispecs.Platform `json:"platforms"` + GCPolicy []PruneInfo `json:"gcPolicy"` + BuildkitVersion BuildkitVersion `json:"buildkitVersion"` } // ListWorkers lists all active workers @@ -27,7 +28,7 @@ func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([] } req := &controlapi.ListWorkersRequest{Filter: info.Filter} - resp, err := c.controlClient().ListWorkers(ctx, req) + resp, err := c.ControlClient().ListWorkers(ctx, req) if err != nil { return nil, errors.Wrap(err, "failed to list workers") } @@ -36,10 +37,11 @@ func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([] for _, w := range resp.Record { wi = append(wi, &WorkerInfo{ - ID: w.ID, - Labels: w.Labels, - Platforms: pb.ToSpecPlatforms(w.Platforms), - GCPolicy: fromAPIGCPolicy(w.GCPolicy), + ID: w.ID, + Labels: w.Labels, + Platforms: pb.ToSpecPlatforms(w.Platforms), + GCPolicy: fromAPIGCPolicy(w.GCPolicy), + BuildkitVersion: fromAPIBuildkitVersion(w.BuildkitVersion), }) } diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go new file mode 100644 index 0000000000..1734d5e156 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go @@ -0,0 +1,132 @@ +package config + +import ( + resolverconfig "github.com/moby/buildkit/util/resolver/config" +) + +// Config provides containerd configuration data for the server +type Config struct { + Debug bool `toml:"debug"` + + // Root is the path to a directory where buildkit will store persistent data + Root string `toml:"root"` + + // Entitlements e.g. security.insecure, network.host + Entitlements []string `toml:"insecure-entitlements"` + // GRPC configuration settings + GRPC GRPCConfig `toml:"grpc"` + + Workers struct { + OCI OCIConfig `toml:"oci"` + Containerd ContainerdConfig `toml:"containerd"` + } `toml:"worker"` + + Registries map[string]resolverconfig.RegistryConfig `toml:"registry"` + + DNS *DNSConfig `toml:"dns"` + + History *HistoryConfig `toml:"history"` +} + +type GRPCConfig struct { + Address []string `toml:"address"` + DebugAddress string `toml:"debugAddress"` + UID *int `toml:"uid"` + GID *int `toml:"gid"` + + TLS TLSConfig `toml:"tls"` + // MaxRecvMsgSize int `toml:"max_recv_message_size"` + // MaxSendMsgSize int `toml:"max_send_message_size"` +} + +type TLSConfig struct { + Cert string `toml:"cert"` + Key string `toml:"key"` + CA string `toml:"ca"` +} + +type GCConfig struct { + GC *bool `toml:"gc"` + GCKeepStorage int64 `toml:"gckeepstorage"` + GCPolicy []GCPolicy `toml:"gcpolicy"` +} + +type NetworkConfig struct { + Mode string `toml:"networkMode"` + CNIConfigPath string `toml:"cniConfigPath"` + CNIBinaryPath string `toml:"cniBinaryPath"` + CNIPoolSize int `toml:"cniPoolSize"` +} + +type OCIConfig struct { + Enabled *bool `toml:"enabled"` + Labels map[string]string `toml:"labels"` + Platforms []string `toml:"platforms"` + Snapshotter string `toml:"snapshotter"` + Rootless bool `toml:"rootless"` + NoProcessSandbox bool `toml:"noProcessSandbox"` + GCConfig + NetworkConfig + // UserRemapUnsupported is unsupported key for testing. The feature is + // incomplete and the intention is to make it default without config. + UserRemapUnsupported string `toml:"userRemapUnsupported"` + // For use in storing the OCI worker binary name that will replace buildkit-runc + Binary string `toml:"binary"` + ProxySnapshotterPath string `toml:"proxySnapshotterPath"` + DefaultCgroupParent string `toml:"defaultCgroupParent"` + + // StargzSnapshotterConfig is configuration for stargz snapshotter. + // We use a generic map[string]interface{} in order to remove the dependency + // on stargz snapshotter's config pkg from our config. + StargzSnapshotterConfig map[string]interface{} `toml:"stargzSnapshotter"` + + // ApparmorProfile is the name of the apparmor profile that should be used to constrain build containers. + // The profile should already be loaded (by a higher level system) before creating a worker. + ApparmorProfile string `toml:"apparmor-profile"` + + // SELinux enables applying SELinux labels. + SELinux bool `toml:"selinux"` + + // MaxParallelism is the maximum number of parallel build steps that can be run at the same time. + MaxParallelism int `toml:"max-parallelism"` +} + +type ContainerdConfig struct { + Address string `toml:"address"` + Enabled *bool `toml:"enabled"` + Labels map[string]string `toml:"labels"` + Platforms []string `toml:"platforms"` + Namespace string `toml:"namespace"` + GCConfig + NetworkConfig + Snapshotter string `toml:"snapshotter"` + + // ApparmorProfile is the name of the apparmor profile that should be used to constrain build containers. + // The profile should already be loaded (by a higher level system) before creating a worker. + ApparmorProfile string `toml:"apparmor-profile"` + + // SELinux enables applying SELinux labels. + SELinux bool `toml:"selinux"` + + MaxParallelism int `toml:"max-parallelism"` + + Rootless bool `toml:"rootless"` +} + +type GCPolicy struct { + All bool `toml:"all"` + KeepBytes int64 `toml:"keepBytes"` + KeepDuration int64 `toml:"keepDuration"` + Filters []string `toml:"filters"` +} + +type DNSConfig struct { + Nameservers []string `toml:"nameservers"` + Options []string `toml:"options"` + SearchDomains []string `toml:"searchDomains"` +} + +type HistoryConfig struct { + MaxAge int64 `toml:"maxAge"` + MaxEntries int64 `toml:"maxEntries"` +} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go new file mode 100644 index 0000000000..6f3f197893 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go @@ -0,0 +1,31 @@ +package config + +const defaultCap int64 = 2e9 // 2GB + +func DefaultGCPolicy(p string, keep int64) []GCPolicy { + if keep == 0 { + keep = DetectDefaultGCCap(p) + } + return []GCPolicy{ + // if build cache uses more than 512MB delete the most easily reproducible data after it has not been used for 2 days + { + Filters: []string{"type==source.local,type==exec.cachemount,type==source.git.checkout"}, + KeepDuration: 48 * 3600, // 48h + KeepBytes: 512 * 1e6, // 512MB + }, + // remove any data not used for 60 days + { + KeepDuration: 60 * 24 * 3600, // 60d + KeepBytes: keep, + }, + // keep the unshared build cache under cap + { + KeepBytes: keep, + }, + // if previous policies were insufficient start deleting internal data to keep build cache under cap + { + All: true, + KeepBytes: keep, + }, + } +} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go new file mode 100644 index 0000000000..a2efe6f568 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go @@ -0,0 +1,18 @@ +//go:build !windows +// +build !windows + +package config + +import ( + "syscall" +) + +func DetectDefaultGCCap(root string) int64 { + var st syscall.Statfs_t + if err := syscall.Statfs(root, &st); err != nil { + return defaultCap + } + diskSize := int64(st.Bsize) * int64(st.Blocks) + avail := diskSize / 10 + return (avail/(1<<30) + 1) * 1e9 // round up +} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go new file mode 100644 index 0000000000..349fddbd51 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go @@ -0,0 +1,8 @@ +//go:build windows +// +build windows + +package config + +func DetectDefaultGCCap(root string) int64 { + return defaultCap +} diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/load.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/load.go new file mode 100644 index 0000000000..46e3dafb24 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/load.go @@ -0,0 +1,36 @@ +package config + +import ( + "io" + "os" + + "github.com/pelletier/go-toml" + "github.com/pkg/errors" +) + +// Load loads buildkitd config +func Load(r io.Reader) (Config, error) { + var c Config + t, err := toml.LoadReader(r) + if err != nil { + return c, errors.Wrap(err, "failed to parse config") + } + err = t.Unmarshal(&c) + if err != nil { + return c, errors.Wrap(err, "failed to parse config") + } + return c, nil +} + +// LoadFile loads buildkitd config file +func LoadFile(fp string) (Config, error) { + f, err := os.Open(fp) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return Config{}, nil + } + return Config{}, errors.Wrapf(err, "failed to load config from %s", fp) + } + defer f.Close() + return Load(f) +} diff --git a/vendor/github.com/moby/buildkit/control/control.go b/vendor/github.com/moby/buildkit/control/control.go index 0d3e7976e5..8446f4bedc 100644 --- a/vendor/github.com/moby/buildkit/control/control.go +++ b/vendor/github.com/moby/buildkit/control/control.go @@ -2,34 +2,49 @@ package control import ( "context" + "fmt" + "strconv" "sync" "sync/atomic" "time" - "github.com/moby/buildkit/util/bklog" - + contentapi "github.com/containerd/containerd/api/services/content/v1" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/services/content/contentserver" + "github.com/docker/distribution/reference" + "github.com/mitchellh/hashstructure/v2" controlapi "github.com/moby/buildkit/api/services/control" apitypes "github.com/moby/buildkit/api/types" "github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/client" + "github.com/moby/buildkit/cmd/buildkitd/config" controlgateway "github.com/moby/buildkit/control/gateway" "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/util/epoch" "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/attestations" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/grpchijack" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/proc" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/imageutil" "github.com/moby/buildkit/util/throttle" "github.com/moby/buildkit/util/tracing/transform" + "github.com/moby/buildkit/version" "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" + "go.etcd.io/bbolt" sdktrace "go.opentelemetry.io/otel/sdk/trace" tracev1 "go.opentelemetry.io/proto/otlp/collector/trace/v1" "golang.org/x/sync/errgroup" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -42,6 +57,10 @@ type Opt struct { ResolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc Entitlements []string TraceCollector sdktrace.SpanExporter + HistoryDB *bbolt.DB + LeaseManager leases.Manager + ContentStore content.Store + HistoryConfig *config.HistoryConfig } type Controller struct { // TODO: ControlService @@ -49,6 +68,7 @@ type Controller struct { // TODO: ControlService buildCount int64 opt Opt solver *llbsolver.Solver + history *llbsolver.HistoryQueue cache solver.CacheManager gatewayForwarder *controlgateway.GatewayForwarder throttledGC func() @@ -61,14 +81,31 @@ func NewController(opt Opt) (*Controller, error) { gatewayForwarder := controlgateway.NewGatewayForwarder() - solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, cache, opt.ResolveCacheImporterFuncs, gatewayForwarder, opt.SessionManager, opt.Entitlements) + hq := llbsolver.NewHistoryQueue(llbsolver.HistoryQueueOpt{ + DB: opt.HistoryDB, + LeaseManager: opt.LeaseManager, + ContentStore: opt.ContentStore, + CleanConfig: opt.HistoryConfig, + }) + + s, err := llbsolver.New(llbsolver.Opt{ + WorkerController: opt.WorkerController, + Frontends: opt.Frontends, + CacheManager: cache, + CacheResolvers: opt.ResolveCacheImporterFuncs, + GatewayForwarder: gatewayForwarder, + SessionManager: opt.SessionManager, + Entitlements: opt.Entitlements, + HistoryQueue: hq, + }) if err != nil { return nil, errors.Wrap(err, "failed to create solver") } c := &Controller{ opt: opt, - solver: solver, + solver: s, + history: hq, cache: cache, gatewayForwarder: gatewayForwarder, } @@ -81,11 +118,17 @@ func NewController(opt Opt) (*Controller, error) { return c, nil } -func (c *Controller) Register(server *grpc.Server) error { +func (c *Controller) Close() error { + return c.opt.WorkerController.Close() +} + +func (c *Controller) Register(server *grpc.Server) { controlapi.RegisterControlServer(server, c) c.gatewayForwarder.Register(server) tracev1.RegisterTraceServiceServer(server, c) - return nil + + store := &roContentStore{c.opt.ContentStore} + contentapi.RegisterContentServer(server, contentserver.New(store)) } func (c *Controller) DiskUsage(ctx context.Context, r *controlapi.DiskUsageRequest) (*controlapi.DiskUsageResponse, error) { @@ -205,6 +248,34 @@ func (c *Controller) Export(ctx context.Context, req *tracev1.ExportTraceService return &tracev1.ExportTraceServiceResponse{}, nil } +func (c *Controller) ListenBuildHistory(req *controlapi.BuildHistoryRequest, srv controlapi.Control_ListenBuildHistoryServer) error { + if err := sendTimestampHeader(srv); err != nil { + return err + } + return c.history.Listen(srv.Context(), req, func(h *controlapi.BuildHistoryEvent) error { + if err := srv.Send(h); err != nil { + return err + } + return nil + }) +} + +func (c *Controller) UpdateBuildHistory(ctx context.Context, req *controlapi.UpdateBuildHistoryRequest) (*controlapi.UpdateBuildHistoryResponse, error) { + if !req.Delete { + err := c.history.UpdateRef(ctx, req.Ref, func(r *controlapi.BuildHistoryRecord) error { + if req.Pinned == r.Pinned { + return nil + } + r.Pinned = req.Pinned + return nil + }) + return &controlapi.UpdateBuildHistoryResponse{}, err + } + + err := c.history.Delete(ctx, req.Ref) + return &controlapi.UpdateBuildHistoryResponse{}, err +} + func translateLegacySolveRequest(req *controlapi.SolveRequest) error { // translates ExportRef and ExportAttrs to new Exports (v0.4.0) if legacyExportRef := req.Cache.ExportRefDeprecated; legacyExportRef != "" { @@ -255,6 +326,17 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* if err != nil { return nil, err } + + // if SOURCE_DATE_EPOCH is set, enable it for the exporter + if v, ok := epoch.ParseBuildArgs(req.FrontendAttrs); ok { + if _, ok := req.ExporterAttrs[epoch.KeySourceDateEpoch]; !ok { + if req.ExporterAttrs == nil { + req.ExporterAttrs = make(map[string]string) + } + req.ExporterAttrs[epoch.KeySourceDateEpoch] = v + } + } + if req.Exporter != "" { exp, err := w.Exporter(req.Exporter, c.opt.SessionManager) if err != nil { @@ -266,32 +348,42 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* } } - var ( - cacheExporter remotecache.Exporter - cacheExportMode solver.CacheExportMode - cacheImports []frontend.CacheOptionsEntry - ) - if len(req.Cache.Exports) > 1 { - // TODO(AkihiroSuda): this should be fairly easy - return nil, errors.New("specifying multiple cache exports is not supported currently") + if c, err := findDuplicateCacheOptions(req.Cache.Exports); err != nil { + return nil, err + } else if c != nil { + types := []string{} + for _, c := range c { + types = append(types, c.Type) + } + return nil, errors.Errorf("duplicate cache exports %s", types) } - - if len(req.Cache.Exports) == 1 { - e := req.Cache.Exports[0] + var cacheExporters []llbsolver.RemoteCacheExporter + for _, e := range req.Cache.Exports { cacheExporterFunc, ok := c.opt.ResolveCacheExporterFuncs[e.Type] if !ok { return nil, errors.Errorf("unknown cache exporter: %q", e.Type) } - cacheExporter, err = cacheExporterFunc(ctx, session.NewGroup(req.Session), e.Attrs) + var exp llbsolver.RemoteCacheExporter + exp.Exporter, err = cacheExporterFunc(ctx, session.NewGroup(req.Session), e.Attrs) if err != nil { - return nil, err + return nil, errors.Wrapf(err, "failed to configure %v cache exporter", e.Type) } if exportMode, supported := parseCacheExportMode(e.Attrs["mode"]); !supported { bklog.G(ctx).Debugf("skipping invalid cache export mode: %s", e.Attrs["mode"]) } else { - cacheExportMode = exportMode + exp.CacheExportMode = exportMode } + if ignoreErrorStr, ok := e.Attrs["ignore-error"]; ok { + if ignoreError, supported := parseCacheExportIgnoreError(ignoreErrorStr); !supported { + bklog.G(ctx).Debugf("skipping invalid cache export ignore-error: %s", e.Attrs["ignore-error"]) + } else { + exp.IgnoreError = ignoreError + } + } + cacheExporters = append(cacheExporters, exp) } + + var cacheImports []frontend.CacheOptionsEntry for _, im := range req.Cache.Imports { cacheImports = append(cacheImports, frontend.CacheOptionsEntry{ Type: im.Type, @@ -299,6 +391,36 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* }) } + attests, err := attestations.Parse(req.FrontendAttrs) + if err != nil { + return nil, err + } + + var procs []llbsolver.Processor + + if attrs, ok := attests["sbom"]; ok { + src := attrs["generator"] + if src == "" { + return nil, errors.Errorf("sbom generator cannot be empty") + } + ref, err := reference.ParseNormalizedNamed(src) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse sbom generator %s", src) + } + + useCache := true + if v, ok := req.FrontendAttrs["no-cache"]; ok && v == "" { + // disable cache if cache is disabled for all stages + useCache = false + } + ref = reference.TagNameOnly(ref) + procs = append(procs, proc.SBOMProcessor(ref.String(), useCache)) + } + + if attrs, ok := attests["provenance"]; ok { + procs = append(procs, proc.ProvenanceProcessor(attrs)) + } + resp, err := c.solver.Solve(ctx, req.Ref, req.Session, frontend.SolveRequest{ Frontend: req.Frontend, Definition: req.Definition, @@ -306,10 +428,11 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* FrontendInputs: req.FrontendInputs, CacheImports: cacheImports, }, llbsolver.ExporterRequest{ - Exporter: expi, - CacheExporter: cacheExporter, - CacheExportMode: cacheExportMode, - }, req.Entitlements) + Exporter: expi, + CacheExporters: cacheExporters, + Type: req.Exporter, + Attrs: req.ExporterAttrs, + }, req.Entitlements, procs, req.Internal, req.SourcePolicy) if err != nil { return nil, err } @@ -319,6 +442,9 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* } func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Control_StatusServer) error { + if err := sendTimestampHeader(stream); err != nil { + return err + } ch := make(chan *client.SolveStatus, 8) eg, ctx := errgroup.WithContext(stream.Context()) @@ -332,68 +458,10 @@ func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Con if !ok { return nil } - logSize := 0 - for { - retry := false - sr := controlapi.StatusResponse{} - for _, v := range ss.Vertexes { - sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{ - Digest: v.Digest, - Inputs: v.Inputs, - Name: v.Name, - Started: v.Started, - Completed: v.Completed, - Error: v.Error, - Cached: v.Cached, - ProgressGroup: v.ProgressGroup, - }) - } - for _, v := range ss.Statuses { - sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{ - ID: v.ID, - Vertex: v.Vertex, - Name: v.Name, - Current: v.Current, - Total: v.Total, - Timestamp: v.Timestamp, - Started: v.Started, - Completed: v.Completed, - }) - } - for i, v := range ss.Logs { - sr.Logs = append(sr.Logs, &controlapi.VertexLog{ - Vertex: v.Vertex, - Stream: int64(v.Stream), - Msg: v.Data, - Timestamp: v.Timestamp, - }) - logSize += len(v.Data) + emptyLogVertexSize - // avoid logs growing big and split apart if they do - if logSize > 1024*1024 { - ss.Vertexes = nil - ss.Statuses = nil - ss.Logs = ss.Logs[i+1:] - retry = true - break - } - } - for _, v := range ss.Warnings { - sr.Warnings = append(sr.Warnings, &controlapi.VertexWarning{ - Vertex: v.Vertex, - Level: int64(v.Level), - Short: v.Short, - Detail: v.Detail, - Info: v.SourceInfo, - Ranges: v.Range, - Url: v.URL, - }) - } - if err := stream.SendMsg(&sr); err != nil { + for _, sr := range ss.Marshal() { + if err := stream.SendMsg(sr); err != nil { return err } - if !retry { - break - } } } }) @@ -426,15 +494,26 @@ func (c *Controller) ListWorkers(ctx context.Context, r *controlapi.ListWorkersR } for _, w := range workers { resp.Record = append(resp.Record, &apitypes.WorkerRecord{ - ID: w.ID(), - Labels: w.Labels(), - Platforms: pb.PlatformsFromSpec(w.Platforms(true)), - GCPolicy: toPBGCPolicy(w.GCPolicy()), + ID: w.ID(), + Labels: w.Labels(), + Platforms: pb.PlatformsFromSpec(w.Platforms(true)), + GCPolicy: toPBGCPolicy(w.GCPolicy()), + BuildkitVersion: toPBBuildkitVersion(w.BuildkitVersion()), }) } return resp, nil } +func (c *Controller) Info(ctx context.Context, r *controlapi.InfoRequest) (*controlapi.InfoResponse, error) { + return &controlapi.InfoResponse{ + BuildkitVersion: &apitypes.BuildkitVersion{ + Package: version.Package, + Version: version.Version, + Revision: version.Revision, + }, + }, nil +} + func (c *Controller) gc() { c.gcmu.Lock() defer c.gcmu.Unlock() @@ -488,6 +567,14 @@ func parseCacheExportMode(mode string) (solver.CacheExportMode, bool) { return solver.CacheExportModeMin, false } +func parseCacheExportIgnoreError(ignoreErrorStr string) (bool, bool) { + ignoreError, err := strconv.ParseBool(ignoreErrorStr) + if err != nil { + return false, false + } + return ignoreError, true +} + func toPBGCPolicy(in []client.PruneInfo) []*apitypes.GCPolicy { policy := make([]*apitypes.GCPolicy, 0, len(in)) for _, p := range in { @@ -500,3 +587,76 @@ func toPBGCPolicy(in []client.PruneInfo) []*apitypes.GCPolicy { } return policy } + +func toPBBuildkitVersion(in client.BuildkitVersion) *apitypes.BuildkitVersion { + return &apitypes.BuildkitVersion{ + Package: in.Package, + Version: in.Version, + Revision: in.Revision, + } +} + +func findDuplicateCacheOptions(cacheOpts []*controlapi.CacheOptionsEntry) ([]*controlapi.CacheOptionsEntry, error) { + seen := map[string]*controlapi.CacheOptionsEntry{} + duplicate := map[string]struct{}{} + for _, opt := range cacheOpts { + k, err := cacheOptKey(*opt) + if err != nil { + return nil, err + } + if _, ok := seen[k]; ok { + duplicate[k] = struct{}{} + } + seen[k] = opt + } + + var duplicates []*controlapi.CacheOptionsEntry + for k := range duplicate { + duplicates = append(duplicates, seen[k]) + } + return duplicates, nil +} + +func cacheOptKey(opt controlapi.CacheOptionsEntry) (string, error) { + if opt.Type == "registry" && opt.Attrs["ref"] != "" { + return opt.Attrs["ref"], nil + } + var rawOpt = struct { + Type string + Attrs map[string]string + }{ + Type: opt.Type, + Attrs: opt.Attrs, + } + hash, err := hashstructure.Hash(rawOpt, hashstructure.FormatV2, nil) + if err != nil { + return "", err + } + return fmt.Sprint(opt.Type, ":", hash), nil +} + +type roContentStore struct { + content.Store +} + +func (cs *roContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + return nil, errors.Errorf("read-only content store") +} + +func (cs *roContentStore) Delete(ctx context.Context, dgst digest.Digest) error { + return errors.Errorf("read-only content store") +} + +func (cs *roContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + return content.Info{}, errors.Errorf("read-only content store") +} + +func (cs *roContentStore) Abort(ctx context.Context, ref string) error { + return errors.Errorf("read-only content store") +} + +const timestampKey = "buildkit-current-timestamp" + +func sendTimestampHeader(srv grpc.ServerStream) error { + return srv.SendHeader(metadata.Pairs(timestampKey, time.Now().Format(time.RFC3339Nano))) +} diff --git a/vendor/github.com/moby/buildkit/control/gateway/gateway.go b/vendor/github.com/moby/buildkit/control/gateway/gateway.go index 62c696d6c4..4451e022d3 100644 --- a/vendor/github.com/moby/buildkit/control/gateway/gateway.go +++ b/vendor/github.com/moby/buildkit/control/gateway/gateway.go @@ -111,6 +111,14 @@ func (gwf *GatewayForwarder) ReadFile(ctx context.Context, req *gwapi.ReadFileRe return fwd.ReadFile(ctx, req) } +func (gwf *GatewayForwarder) Evaluate(ctx context.Context, req *gwapi.EvaluateRequest) (*gwapi.EvaluateResponse, error) { + fwd, err := gwf.lookupForwarder(ctx) + if err != nil { + return nil, errors.Wrap(err, "forwarding Evaluate") + } + return fwd.Evaluate(ctx, req) +} + func (gwf *GatewayForwarder) Ping(ctx context.Context, req *gwapi.PingRequest) (*gwapi.PongResponse, error) { fwd, err := gwf.lookupForwarder(ctx) if err != nil { diff --git a/vendor/github.com/moby/buildkit/control/init.go b/vendor/github.com/moby/buildkit/control/init.go deleted file mode 100644 index 2e86133e41..0000000000 --- a/vendor/github.com/moby/buildkit/control/init.go +++ /dev/null @@ -1,10 +0,0 @@ -package control - -import controlapi "github.com/moby/buildkit/api/services/control" - -var emptyLogVertexSize int - -func init() { - emptyLogVertex := controlapi.VertexLog{} - emptyLogVertexSize = emptyLogVertex.Size() -} diff --git a/vendor/github.com/moby/buildkit/executor/executor.go b/vendor/github.com/moby/buildkit/executor/executor.go index 4727af4b03..a323bcc9cc 100644 --- a/vendor/github.com/moby/buildkit/executor/executor.go +++ b/vendor/github.com/moby/buildkit/executor/executor.go @@ -23,6 +23,8 @@ type Meta struct { CgroupParent string NetMode pb.NetMode SecurityMode pb.SecurityMode + + RemoveMountStubsRecursive bool } type Mountable interface { diff --git a/vendor/github.com/moby/buildkit/executor/oci/hosts.go b/vendor/github.com/moby/buildkit/executor/oci/hosts.go index d0505c28cc..0d193555c9 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/hosts.go +++ b/vendor/github.com/moby/buildkit/executor/oci/hosts.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "io/ioutil" "os" "path/filepath" @@ -56,7 +55,7 @@ func makeHostsFile(stateDir string, extraHosts []executor.HostIP, idmap *idtools } tmpPath := p + ".tmp" - if err := ioutil.WriteFile(tmpPath, b.Bytes(), 0644); err != nil { + if err := os.WriteFile(tmpPath, b.Bytes(), 0644); err != nil { return "", nil, err } diff --git a/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go b/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go index da77456976..3ac0feda7a 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go +++ b/vendor/github.com/moby/buildkit/executor/oci/resolvconf.go @@ -2,7 +2,6 @@ package oci import ( "context" - "io/ioutil" "os" "path/filepath" @@ -100,7 +99,7 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity } tmpPath := p + ".tmp" - if err := ioutil.WriteFile(tmpPath, f.Content, 0644); err != nil { + if err := os.WriteFile(tmpPath, f.Content, 0644); err != nil { return "", err } diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go index 262cc50f20..213ebb7366 100644 --- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go @@ -164,7 +164,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, if !ok { return errors.Errorf("unknown network mode %s", meta.NetMode) } - namespace, err := provider.New() + namespace, err := provider.New(ctx, meta.Hostname) if err != nil { return err } @@ -224,7 +224,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, } defer mount.Unmount(rootFSPath, 0) - defer executor.MountStubsCleaner(rootFSPath, mounts)() + defer executor.MountStubsCleaner(rootFSPath, mounts, meta.RemoveMountStubsRecursive)() uid, gid, sgids, err := oci.GetUser(rootFSPath, meta.User) if err != nil { diff --git a/vendor/github.com/moby/buildkit/executor/stubs.go b/vendor/github.com/moby/buildkit/executor/stubs.go index 2c13b13053..22a8ac1310 100644 --- a/vendor/github.com/moby/buildkit/executor/stubs.go +++ b/vendor/github.com/moby/buildkit/executor/stubs.go @@ -7,9 +7,11 @@ import ( "syscall" "github.com/containerd/continuity/fs" + "github.com/moby/buildkit/util/system" + "github.com/sirupsen/logrus" ) -func MountStubsCleaner(dir string, mounts []Mount) func() { +func MountStubsCleaner(dir string, mounts []Mount, recursive bool) func() { names := []string{"/etc/resolv.conf", "/etc/hosts"} for _, m := range mounts { @@ -28,9 +30,22 @@ func MountStubsCleaner(dir string, mounts []Mount) func() { continue } - _, err = os.Lstat(realPath) - if errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) { + for { + _, err = os.Lstat(realPath) + if !(errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR)) { + break + } paths = append(paths, realPath) + + if !recursive { + break + } + + realPathNext := filepath.Dir(realPath) + if realPath == realPathNext { + break + } + realPath = realPathNext } } @@ -40,10 +55,41 @@ func MountStubsCleaner(dir string, mounts []Mount) func() { if err != nil { continue } - if st.Size() != 0 { + if st.IsDir() { + entries, err := os.ReadDir(p) + if err != nil { + continue + } + if len(entries) != 0 { + continue + } + } else if st.Size() != 0 { continue } - os.Remove(p) + + // Back up the timestamps of the dir for reproducible builds + // https://github.com/moby/buildkit/issues/3148 + dir := filepath.Dir(p) + dirSt, err := os.Stat(dir) + if err != nil { + logrus.WithError(err).Warnf("Failed to stat %q (parent of mount stub %q)", dir, p) + continue + } + mtime := dirSt.ModTime() + atime, err := system.Atime(dirSt) + if err != nil { + logrus.WithError(err).Warnf("Failed to stat atime of %q (parent of mount stub %q)", dir, p) + atime = mtime + } + + if err := os.Remove(p); err != nil { + logrus.WithError(err).Warnf("Failed to remove mount stub %q", p) + } + + // Restore the timestamps of the dir + if err := os.Chtimes(dir, atime, mtime); err != nil { + logrus.WithError(err).Warnf("Failed to restore time time mount stub timestamp (os.Chtimes(%q, %v, %v))", dir, atime, mtime) + } } } } diff --git a/vendor/github.com/moby/buildkit/exporter/attestation/filter.go b/vendor/github.com/moby/buildkit/exporter/attestation/filter.go new file mode 100644 index 0000000000..5abc234b87 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/attestation/filter.go @@ -0,0 +1,45 @@ +package attestation + +import ( + "bytes" + + "github.com/moby/buildkit/exporter" +) + +func Filter(attestations []exporter.Attestation, include map[string][]byte, exclude map[string][]byte) []exporter.Attestation { + if len(include) == 0 && len(exclude) == 0 { + return attestations + } + + result := []exporter.Attestation{} + for _, att := range attestations { + meta := att.Metadata + if meta == nil { + meta = map[string][]byte{} + } + + match := true + for k, v := range include { + if !bytes.Equal(meta[k], v) { + match = false + break + } + } + if !match { + continue + } + + for k, v := range exclude { + if bytes.Equal(meta[k], v) { + match = false + break + } + } + if !match { + continue + } + + result = append(result, att) + } + return result +} diff --git a/vendor/github.com/moby/buildkit/exporter/attestation/make.go b/vendor/github.com/moby/buildkit/exporter/attestation/make.go new file mode 100644 index 0000000000..8ed910c1e8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/attestation/make.go @@ -0,0 +1,138 @@ +package attestation + +import ( + "context" + "encoding/json" + "os" + + "github.com/containerd/continuity/fs" + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/exporter" + gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/result" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +// ReadAll reads the content of an attestation. +func ReadAll(ctx context.Context, s session.Group, att exporter.Attestation) ([]byte, error) { + var content []byte + if att.ContentFunc != nil { + data, err := att.ContentFunc() + if err != nil { + return nil, err + } + content = data + } else if att.Ref != nil { + mount, err := att.Ref.Mount(ctx, true, s) + if err != nil { + return nil, err + } + lm := snapshot.LocalMounter(mount) + src, err := lm.Mount() + if err != nil { + return nil, err + } + defer lm.Unmount() + + p, err := fs.RootPath(src, att.Path) + if err != nil { + return nil, err + } + content, err = os.ReadFile(p) + if err != nil { + return nil, errors.Wrap(err, "cannot read in-toto attestation") + } + } else { + return nil, errors.New("no available content for attestation") + } + if len(content) == 0 { + content = nil + } + return content, nil +} + +// MakeInTotoStatements iterates over all provided result attestations and +// generates intoto attestation statements. +func MakeInTotoStatements(ctx context.Context, s session.Group, attestations []exporter.Attestation, defaultSubjects []intoto.Subject) ([]intoto.Statement, error) { + eg, ctx := errgroup.WithContext(ctx) + statements := make([]intoto.Statement, len(attestations)) + + for i, att := range attestations { + i, att := i, att + eg.Go(func() error { + content, err := ReadAll(ctx, s, att) + if err != nil { + return err + } + + switch att.Kind { + case gatewaypb.AttestationKindInToto: + stmt, err := makeInTotoStatement(ctx, content, att, defaultSubjects) + if err != nil { + return err + } + statements[i] = *stmt + case gatewaypb.AttestationKindBundle: + return errors.New("bundle attestation kind must be un-bundled first") + } + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, err + } + return statements, nil +} + +func makeInTotoStatement(ctx context.Context, content []byte, attestation exporter.Attestation, defaultSubjects []intoto.Subject) (*intoto.Statement, error) { + if len(attestation.InToto.Subjects) == 0 { + attestation.InToto.Subjects = []result.InTotoSubject{{ + Kind: gatewaypb.InTotoSubjectKindSelf, + }} + } + subjects := []intoto.Subject{} + for _, subject := range attestation.InToto.Subjects { + subjectName := "_" + if subject.Name != "" { + subjectName = subject.Name + } + + switch subject.Kind { + case gatewaypb.InTotoSubjectKindSelf: + for _, defaultSubject := range defaultSubjects { + subjectNames := []string{} + subjectNames = append(subjectNames, defaultSubject.Name) + if subjectName != "_" { + subjectNames = append(subjectNames, subjectName) + } + + for _, name := range subjectNames { + subjects = append(subjects, intoto.Subject{ + Name: name, + Digest: defaultSubject.Digest, + }) + } + } + case gatewaypb.InTotoSubjectKindRaw: + subjects = append(subjects, intoto.Subject{ + Name: subjectName, + Digest: result.ToDigestMap(subject.Digest...), + }) + default: + return nil, errors.Errorf("unknown attestation subject type %T", subject) + } + } + + stmt := intoto.Statement{ + StatementHeader: intoto.StatementHeader{ + Type: intoto.StatementInTotoV01, + PredicateType: attestation.InToto.PredicateType, + Subject: subjects, + }, + Predicate: json.RawMessage(content), + } + return &stmt, nil +} diff --git a/vendor/github.com/moby/buildkit/exporter/attestation/unbundle.go b/vendor/github.com/moby/buildkit/exporter/attestation/unbundle.go new file mode 100644 index 0000000000..a2120d7975 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/attestation/unbundle.go @@ -0,0 +1,192 @@ +package attestation + +import ( + "context" + "encoding/json" + "os" + "path" + "strings" + + "github.com/containerd/continuity/fs" + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/exporter" + gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/result" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +// Unbundle iterates over all provided result attestations and un-bundles any +// bundled attestations by loading them from the provided refs map. +func Unbundle(ctx context.Context, s session.Group, bundled []exporter.Attestation) ([]exporter.Attestation, error) { + if err := Validate(bundled); err != nil { + return nil, err + } + + eg, ctx := errgroup.WithContext(ctx) + unbundled := make([][]exporter.Attestation, len(bundled)) + + for i, att := range bundled { + i, att := i, att + eg.Go(func() error { + switch att.Kind { + case gatewaypb.AttestationKindInToto: + if strings.HasPrefix(att.InToto.PredicateType, "https://slsa.dev/provenance/") { + if att.ContentFunc == nil { + // provenance may only be set buildkit-side using ContentFunc + return errors.New("frontend may not set provenance attestations") + } + } + unbundled[i] = append(unbundled[i], att) + case gatewaypb.AttestationKindBundle: + if att.ContentFunc != nil { + return errors.New("attestation bundle cannot have callback") + } + if att.Ref == nil { + return errors.Errorf("no ref provided for attestation bundle") + } + + mount, err := att.Ref.Mount(ctx, true, s) + if err != nil { + return err + } + lm := snapshot.LocalMounter(mount) + src, err := lm.Mount() + if err != nil { + return err + } + defer lm.Unmount() + + atts, err := unbundle(ctx, src, att) + if err != nil { + return err + } + for _, att := range atts { + if strings.HasPrefix(att.InToto.PredicateType, "https://slsa.dev/provenance/") { + return errors.New("frontend may not bundle provenance attestations") + } + } + unbundled[i] = append(unbundled[i], atts...) + } + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, err + } + + var joined []exporter.Attestation + for _, atts := range unbundled { + joined = append(joined, atts...) + } + joined = sort(joined) + + if err := Validate(joined); err != nil { + return nil, err + } + return joined, nil +} + +func sort(atts []exporter.Attestation) []exporter.Attestation { + isCore := make([]bool, len(atts)) + for i, att := range atts { + name, ok := att.Metadata[result.AttestationSBOMCore] + if !ok { + continue + } + if n, _, _ := strings.Cut(att.Path, "."); n != string(name) { + continue + } + isCore[i] = true + } + + result := make([]exporter.Attestation, 0, len(atts)) + for i, att := range atts { + if isCore[i] { + result = append(result, att) + } + } + for i, att := range atts { + if !isCore[i] { + result = append(result, att) + } + } + return result +} + +func unbundle(ctx context.Context, root string, bundle exporter.Attestation) ([]exporter.Attestation, error) { + dir, err := fs.RootPath(root, bundle.Path) + if err != nil { + return nil, err + } + entries, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + + var unbundled []exporter.Attestation + for _, entry := range entries { + p, err := fs.RootPath(dir, entry.Name()) + if err != nil { + return nil, err + } + f, err := os.Open(p) + if err != nil { + return nil, err + } + dec := json.NewDecoder(f) + var stmt intoto.Statement + if err := dec.Decode(&stmt); err != nil { + return nil, errors.Wrap(err, "cannot decode in-toto statement") + } + if bundle.InToto.PredicateType != "" && stmt.PredicateType != bundle.InToto.PredicateType { + return nil, errors.Errorf("bundle entry %s does not match required predicate type %s", stmt.PredicateType, bundle.InToto.PredicateType) + } + + predicate, err := json.Marshal(stmt.Predicate) + if err != nil { + return nil, err + } + + subjects := make([]result.InTotoSubject, len(stmt.Subject)) + for i, subject := range stmt.Subject { + subjects[i] = result.InTotoSubject{ + Kind: gatewaypb.InTotoSubjectKindRaw, + Name: subject.Name, + Digest: result.FromDigestMap(subject.Digest), + } + } + unbundled = append(unbundled, exporter.Attestation{ + Kind: gatewaypb.AttestationKindInToto, + Metadata: bundle.Metadata, + Path: path.Join(bundle.Path, entry.Name()), + ContentFunc: func() ([]byte, error) { return predicate, nil }, + InToto: result.InTotoAttestation{ + PredicateType: stmt.PredicateType, + Subjects: subjects, + }, + }) + } + return unbundled, nil +} + +func Validate(atts []exporter.Attestation) error { + for _, att := range atts { + if err := validate(att); err != nil { + return err + } + } + return nil +} + +func validate(att exporter.Attestation) error { + if att.Kind != gatewaypb.AttestationKindBundle && att.Path == "" { + return errors.New("attestation does not have set path") + } + if att.Ref == nil && att.ContentFunc == nil { + return errors.New("attestation does not have available content") + } + return nil +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/annotations.go b/vendor/github.com/moby/buildkit/exporter/containerimage/annotations.go new file mode 100644 index 0000000000..cdb5e94509 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/annotations.go @@ -0,0 +1,139 @@ +package containerimage + +import ( + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + + "github.com/containerd/containerd/platforms" + "github.com/moby/buildkit/exporter/containerimage/exptypes" +) + +type Annotations struct { + Index map[string]string + IndexDescriptor map[string]string + Manifest map[string]string + ManifestDescriptor map[string]string +} + +// AnnotationsGroup is a map of annotations keyed by the reference key +type AnnotationsGroup map[string]*Annotations + +func ParseAnnotations(data map[string][]byte) (AnnotationsGroup, map[string][]byte, error) { + ag := make(AnnotationsGroup) + rest := make(map[string][]byte) + + for k, v := range data { + a, ok, err := exptypes.ParseAnnotationKey(k) + if !ok { + rest[k] = v + continue + } + if err != nil { + return nil, nil, err + } + + p := a.PlatformString() + + if ag[p] == nil { + ag[p] = &Annotations{ + IndexDescriptor: make(map[string]string), + Index: make(map[string]string), + Manifest: make(map[string]string), + ManifestDescriptor: make(map[string]string), + } + } + + switch a.Type { + case exptypes.AnnotationIndex: + ag[p].Index[a.Key] = string(v) + case exptypes.AnnotationIndexDescriptor: + ag[p].IndexDescriptor[a.Key] = string(v) + case exptypes.AnnotationManifest: + ag[p].Manifest[a.Key] = string(v) + case exptypes.AnnotationManifestDescriptor: + ag[p].ManifestDescriptor[a.Key] = string(v) + default: + return nil, nil, errors.Errorf("unrecognized annotation type %s", a.Type) + } + } + return ag, rest, nil +} + +func (ag AnnotationsGroup) Platform(p *ocispecs.Platform) *Annotations { + res := &Annotations{ + IndexDescriptor: make(map[string]string), + Index: make(map[string]string), + Manifest: make(map[string]string), + ManifestDescriptor: make(map[string]string), + } + + ps := []string{""} + if p != nil { + ps = append(ps, platforms.Format(*p)) + } + + for _, a := range ag { + for k, v := range a.Index { + res.Index[k] = v + } + for k, v := range a.IndexDescriptor { + res.IndexDescriptor[k] = v + } + } + for _, pk := range ps { + if _, ok := ag[pk]; !ok { + continue + } + + for k, v := range ag[pk].Manifest { + res.Manifest[k] = v + } + for k, v := range ag[pk].ManifestDescriptor { + res.ManifestDescriptor[k] = v + } + } + return res +} + +func (ag AnnotationsGroup) Merge(other AnnotationsGroup) AnnotationsGroup { + if other == nil { + return ag + } + if ag == nil { + ag = make(AnnotationsGroup) + } + + for k, v := range other { + ag[k] = ag[k].merge(v) + } + return ag +} + +func (a *Annotations) merge(other *Annotations) *Annotations { + if other == nil { + return a + } + if a == nil { + a = &Annotations{ + IndexDescriptor: make(map[string]string), + Index: make(map[string]string), + Manifest: make(map[string]string), + ManifestDescriptor: make(map[string]string), + } + } + + for k, v := range other.Index { + a.Index[k] = v + } + for k, v := range other.IndexDescriptor { + a.IndexDescriptor[k] = v + } + for k, v := range other.Manifest { + a.Manifest[k] = v + } + for k, v := range other.ManifestDescriptor { + a.ManifestDescriptor[k] = v + } + + return a +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/attestations.go b/vendor/github.com/moby/buildkit/exporter/containerimage/attestations.go new file mode 100644 index 0000000000..782c187330 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/attestations.go @@ -0,0 +1,212 @@ +package containerimage + +import ( + "bytes" + "context" + "fmt" + "io/fs" + "strings" + + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/attestation" + gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/result" + "github.com/moby/buildkit/version" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + spdx_json "github.com/spdx/tools-golang/json" + "github.com/spdx/tools-golang/spdx/common" + spdx "github.com/spdx/tools-golang/spdx/v2_3" +) + +var intotoPlatform ocispecs.Platform = ocispecs.Platform{ + Architecture: "unknown", + OS: "unknown", +} + +// supplementSBOM modifies SPDX attestations to include the file layers +func supplementSBOM(ctx context.Context, s session.Group, target cache.ImmutableRef, targetRemote *solver.Remote, att exporter.Attestation) (exporter.Attestation, error) { + if att.Kind != gatewaypb.AttestationKindInToto { + return att, nil + } + if att.InToto.PredicateType != intoto.PredicateSPDX { + return att, nil + } + name, ok := att.Metadata[result.AttestationSBOMCore] + if !ok { + return att, nil + } + if n, _, _ := strings.Cut(att.Path, "."); n != string(name) { + return att, nil + } + + content, err := attestation.ReadAll(ctx, s, att) + if err != nil { + return att, err + } + + doc, err := decodeSPDX(content) + if err != nil { + // ignore decoding error + return att, nil + } + + layers, err := newFileLayerFinder(target, targetRemote) + if err != nil { + return att, err + } + modifyFile := func(f *spdx.File) error { + if f == nil { + // Skip over nil entries - this is likely a bug in the SPDX parser, + // but we shouldn't accidentally panic if we encounter it. + return nil + } + + if f.FileComment != "" { + // Skip over files that already have a comment - since the data is + // unstructured, we can't correctly overwrite this field without + // possibly breaking some scanner functionality. + return nil + } + + _, desc, err := layers.find(ctx, s, f.FileName) + if err != nil { + if !errors.Is(err, fs.ErrNotExist) { + return err + } + return nil + } + f.FileComment = fmt.Sprintf("layerID: %s", desc.Digest.String()) + return nil + } + for _, f := range doc.Files { + if err := modifyFile(f); err != nil { + return att, err + } + } + for _, p := range doc.Packages { + for _, f := range p.Files { + if err := modifyFile(f); err != nil { + return att, err + } + } + } + + if doc.CreationInfo == nil { + doc.CreationInfo = &spdx.CreationInfo{} + } + doc.CreationInfo.Creators = append(doc.CreationInfo.Creators, common.Creator{ + CreatorType: "Tool", + Creator: "buildkit-" + version.Version, + }) + + content, err = encodeSPDX(doc) + if err != nil { + return att, err + } + + return exporter.Attestation{ + Kind: att.Kind, + Path: att.Path, + ContentFunc: func() ([]byte, error) { return content, nil }, + InToto: att.InToto, + }, nil +} + +func decodeSPDX(dt []byte) (s *spdx.Document, err error) { + doc, err := spdx_json.Load2_3(bytes.NewReader(dt)) + if err != nil { + return nil, errors.Wrap(err, "unable to decode spdx") + } + if doc == nil { + return nil, errors.New("decoding produced empty spdx document") + } + return doc, nil +} + +func encodeSPDX(s *spdx.Document) (dt []byte, err error) { + w := bytes.NewBuffer(nil) + err = spdx_json.Save2_3(s, w) + if err != nil { + return nil, errors.Wrap(err, "unable to encode spdx") + } + return w.Bytes(), nil +} + +// fileLayerFinder finds the layer that contains a file, with caching to avoid +// repeated FileList lookups. +type fileLayerFinder struct { + pending []fileLayerEntry + cache map[string]fileLayerEntry +} + +type fileLayerEntry struct { + ref cache.ImmutableRef + desc ocispecs.Descriptor +} + +func newFileLayerFinder(target cache.ImmutableRef, remote *solver.Remote) (fileLayerFinder, error) { + chain := target.LayerChain() + descs := remote.Descriptors + if len(chain) != len(descs) { + return fileLayerFinder{}, errors.New("layer chain and descriptor list are not the same length") + } + + pending := make([]fileLayerEntry, len(chain)) + for i, ref := range chain { + pending[i] = fileLayerEntry{ref: ref, desc: descs[i]} + } + return fileLayerFinder{ + pending: pending, + cache: map[string]fileLayerEntry{}, + }, nil +} + +// find finds the layer that contains the file, returning the ImmutableRef and +// descriptor for the layer. If the file searched for was deleted, find returns +// the layer that created the file, not the one that deleted it. +// +// find is not concurrency-safe. +func (c *fileLayerFinder) find(ctx context.Context, s session.Group, filename string) (cache.ImmutableRef, *ocispecs.Descriptor, error) { + // return immediately if we've already found the layer containing filename + if cache, ok := c.cache[filename]; ok { + return cache.ref, &cache.desc, nil + } + + for len(c.pending) > 0 { + // pop the last entry off the pending list (we traverse the layers backwards) + pending := c.pending[len(c.pending)-1] + files, err := pending.ref.FileList(ctx, s) + if err != nil { + return nil, nil, err + } + c.pending = c.pending[:len(c.pending)-1] + + found := false + for _, f := range files { + if strings.HasPrefix(f, ".wh.") { + // skip whiteout files, we only care about file creations + continue + } + + // add all files in this layer to the cache + if _, ok := c.cache[f]; ok { + continue + } + c.cache[f] = pending + + // if we found the file, return the layer (but finish populating the cache first) + if f == filename { + found = true + } + } + if found { + return pending.ref, &pending.desc, nil + } + } + return nil, nil, fs.ErrNotExist +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/export.go b/vendor/github.com/moby/buildkit/exporter/containerimage/export.go new file mode 100644 index 0000000000..55eaf3ff58 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/export.go @@ -0,0 +1,488 @@ +package containerimage + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + "github.com/containerd/containerd/rootfs" + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/cache" + cacheconfig "github.com/moby/buildkit/cache/config" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/push" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + keyPush = "push" + keyPushByDigest = "push-by-digest" + keyInsecure = "registry.insecure" + keyUnpack = "unpack" + keyDanglingPrefix = "dangling-name-prefix" + keyNameCanonical = "name-canonical" + keyStore = "store" + + // keyUnsafeInternalStoreAllowIncomplete should only be used for tests. This option allows exporting image to the image store + // as well as lacking some blobs in the content store. Some integration tests for lazyref behaviour depends on this option. + // Ignored when store=false. + keyUnsafeInternalStoreAllowIncomplete = "unsafe-internal-store-allow-incomplete" +) + +type Opt struct { + SessionManager *session.Manager + ImageWriter *ImageWriter + Images images.Store + RegistryHosts docker.RegistryHosts + LeaseManager leases.Manager +} + +type imageExporter struct { + opt Opt +} + +// New returns a new containerimage exporter instance that supports exporting +// to an image store and pushing the image to registry. +// This exporter supports following values in returned kv map: +// - containerimage.digest - The digest of the root manifest for the image. +func New(opt Opt) (exporter.Exporter, error) { + im := &imageExporter{opt: opt} + return im, nil +} + +func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { + i := &imageExporterInstance{ + imageExporter: e, + opts: ImageCommitOpts{ + RefCfg: cacheconfig.RefConfig{ + Compression: compression.New(compression.Default), + }, + BuildInfo: true, + ForceInlineAttestations: true, + }, + store: true, + } + + opt, err := i.opts.Load(opt) + if err != nil { + return nil, err + } + + for k, v := range opt { + switch k { + case keyPush: + if v == "" { + i.push = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.push = b + case keyPushByDigest: + if v == "" { + i.pushByDigest = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.pushByDigest = b + case keyInsecure: + if v == "" { + i.insecure = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.insecure = b + case keyUnpack: + if v == "" { + i.unpack = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.unpack = b + case keyStore: + if v == "" { + i.store = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.store = b + case keyUnsafeInternalStoreAllowIncomplete: + if v == "" { + i.storeAllowIncomplete = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.storeAllowIncomplete = b + case keyDanglingPrefix: + i.danglingPrefix = v + case keyNameCanonical: + if v == "" { + i.nameCanonical = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.nameCanonical = b + default: + if i.meta == nil { + i.meta = make(map[string][]byte) + } + i.meta[k] = []byte(v) + } + } + return i, nil +} + +type imageExporterInstance struct { + *imageExporter + opts ImageCommitOpts + push bool + pushByDigest bool + unpack bool + store bool + storeAllowIncomplete bool + insecure bool + nameCanonical bool + danglingPrefix string + meta map[string][]byte +} + +func (e *imageExporterInstance) Name() string { + return "exporting to image" +} + +func (e *imageExporterInstance) Config() *exporter.Config { + return exporter.NewConfigWithCompression(e.opts.RefCfg.Compression) +} + +func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source, sessionID string) (_ map[string]string, descref exporter.DescriptorReference, err error) { + if src.Metadata == nil { + src.Metadata = make(map[string][]byte) + } + for k, v := range e.meta { + src.Metadata[k] = v + } + + opts := e.opts + as, _, err := ParseAnnotations(src.Metadata) + if err != nil { + return nil, nil, err + } + opts.Annotations = opts.Annotations.Merge(as) + + ctx, done, err := leaseutil.WithLease(ctx, e.opt.LeaseManager, leaseutil.MakeTemporary) + if err != nil { + return nil, nil, err + } + defer func() { + if descref == nil { + done(context.TODO()) + } + }() + + desc, err := e.opt.ImageWriter.Commit(ctx, src, sessionID, &opts) + if err != nil { + return nil, nil, err + } + defer func() { + if err == nil { + descref = NewDescriptorReference(*desc, done) + } + }() + + resp := make(map[string]string) + + if n, ok := src.Metadata["image.name"]; e.opts.ImageName == "*" && ok { + e.opts.ImageName = string(n) + } + + nameCanonical := e.nameCanonical + if e.opts.ImageName == "" && e.danglingPrefix != "" { + e.opts.ImageName = e.danglingPrefix + "@" + desc.Digest.String() + nameCanonical = false + } + + if e.opts.ImageName != "" { + targetNames := strings.Split(e.opts.ImageName, ",") + for _, targetName := range targetNames { + if e.opt.Images != nil && e.store { + tagDone := progress.OneOff(ctx, "naming to "+targetName) + img := images.Image{ + Target: *desc, + CreatedAt: time.Now(), + } + sfx := []string{""} + if nameCanonical { + sfx = append(sfx, "@"+desc.Digest.String()) + } + for _, sfx := range sfx { + img.Name = targetName + sfx + if _, err := e.opt.Images.Update(ctx, img); err != nil { + if !errors.Is(err, errdefs.ErrNotFound) { + return nil, nil, tagDone(err) + } + + if _, err := e.opt.Images.Create(ctx, img); err != nil { + return nil, nil, tagDone(err) + } + } + } + tagDone(nil) + + if src.Ref != nil && e.unpack { + if err := e.unpackImage(ctx, img, src, session.NewGroup(sessionID)); err != nil { + return nil, nil, err + } + } + + if !e.storeAllowIncomplete { + if src.Ref != nil { + remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + if err != nil { + return nil, nil, err + } + remote := remotes[0] + if unlazier, ok := remote.Provider.(cache.Unlazier); ok { + if err := unlazier.Unlazy(ctx); err != nil { + return nil, nil, err + } + } + } + if len(src.Refs) > 0 { + for _, r := range src.Refs { + remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + if err != nil { + return nil, nil, err + } + remote := remotes[0] + if unlazier, ok := remote.Provider.(cache.Unlazier); ok { + if err := unlazier.Unlazy(ctx); err != nil { + return nil, nil, err + } + } + } + } + } + } + if e.push { + err := e.pushImage(ctx, src, sessionID, targetName, desc.Digest) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to push %v", targetName) + } + } + } + resp["image.name"] = e.opts.ImageName + } + + resp[exptypes.ExporterImageDigestKey] = desc.Digest.String() + if v, ok := desc.Annotations[exptypes.ExporterConfigDigestKey]; ok { + resp[exptypes.ExporterImageConfigDigestKey] = v + delete(desc.Annotations, exptypes.ExporterConfigDigestKey) + } + + dtdesc, err := json.Marshal(desc) + if err != nil { + return nil, nil, err + } + resp[exptypes.ExporterImageDescriptorKey] = base64.StdEncoding.EncodeToString(dtdesc) + + return resp, nil, nil +} + +func (e *imageExporterInstance) pushImage(ctx context.Context, src *exporter.Source, sessionID string, targetName string, dgst digest.Digest) error { + annotations := map[digest.Digest]map[string]string{} + mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore()) + if src.Ref != nil { + remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + if err != nil { + return err + } + remote := remotes[0] + for _, desc := range remote.Descriptors { + mprovider.Add(desc.Digest, remote.Provider) + addAnnotations(annotations, desc) + } + } + if len(src.Refs) > 0 { + for _, r := range src.Refs { + remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID)) + if err != nil { + return err + } + remote := remotes[0] + for _, desc := range remote.Descriptors { + mprovider.Add(desc.Digest, remote.Provider) + addAnnotations(annotations, desc) + } + } + } + + ctx = remotes.WithMediaTypeKeyPrefix(ctx, intoto.PayloadType, "intoto") + return push.Push(ctx, e.opt.SessionManager, sessionID, mprovider, e.opt.ImageWriter.ContentStore(), dgst, targetName, e.insecure, e.opt.RegistryHosts, e.pushByDigest, annotations) +} + +func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Image, src *exporter.Source, s session.Group) (err0 error) { + unpackDone := progress.OneOff(ctx, "unpacking to "+img.Name) + defer func() { + unpackDone(err0) + }() + + var ( + contentStore = e.opt.ImageWriter.ContentStore() + applier = e.opt.ImageWriter.Applier() + snapshotter = e.opt.ImageWriter.Snapshotter() + ) + + // fetch manifest by default platform + manifest, err := images.Manifest(ctx, contentStore, img.Target, platforms.Default()) + if err != nil { + return err + } + + topLayerRef := src.Ref + if len(src.Refs) > 0 { + if r, ok := src.Refs[defaultPlatform()]; ok { + topLayerRef = r + } else { + return errors.Errorf("no reference for default platform %s", defaultPlatform()) + } + } + + remotes, err := topLayerRef.GetRemotes(ctx, true, e.opts.RefCfg, false, s) + if err != nil { + return err + } + remote := remotes[0] + + // ensure the content for each layer exists locally in case any are lazy + if unlazier, ok := remote.Provider.(cache.Unlazier); ok { + if err := unlazier.Unlazy(ctx); err != nil { + return err + } + } + + layers, err := getLayers(ctx, remote.Descriptors, manifest) + if err != nil { + return err + } + + // get containerd snapshotter + ctrdSnapshotter, release := snapshot.NewContainerdSnapshotter(snapshotter) + defer release() + + var chain []digest.Digest + for _, layer := range layers { + if _, err := rootfs.ApplyLayer(ctx, layer, chain, ctrdSnapshotter, applier); err != nil { + return err + } + chain = append(chain, layer.Diff.Digest) + } + + var ( + keyGCLabel = fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", snapshotter.Name()) + valueGCLabel = identity.ChainID(chain).String() + ) + + cinfo := content.Info{ + Digest: manifest.Config.Digest, + Labels: map[string]string{keyGCLabel: valueGCLabel}, + } + _, err = contentStore.Update(ctx, cinfo, fmt.Sprintf("labels.%s", keyGCLabel)) + return err +} + +func getLayers(ctx context.Context, descs []ocispecs.Descriptor, manifest ocispecs.Manifest) ([]rootfs.Layer, error) { + if len(descs) != len(manifest.Layers) { + return nil, errors.Errorf("mismatched image rootfs and manifest layers") + } + + layers := make([]rootfs.Layer, len(descs)) + for i, desc := range descs { + layers[i].Diff = ocispecs.Descriptor{ + MediaType: ocispecs.MediaTypeImageLayer, + Digest: digest.Digest(desc.Annotations["containerd.io/uncompressed"]), + } + layers[i].Blob = manifest.Layers[i] + } + return layers, nil +} + +func addAnnotations(m map[digest.Digest]map[string]string, desc ocispecs.Descriptor) { + if desc.Annotations == nil { + return + } + a, ok := m[desc.Digest] + if !ok { + m[desc.Digest] = desc.Annotations + return + } + for k, v := range desc.Annotations { + a[k] = v + } +} + +func defaultPlatform() string { + // Use normalized platform string to avoid the mismatch with platform options which + // are normalized using platforms.Normalize() + return platforms.Format(platforms.Normalize(platforms.DefaultSpec())) +} + +func NewDescriptorReference(desc ocispecs.Descriptor, release func(context.Context) error) exporter.DescriptorReference { + return &descriptorReference{ + desc: desc, + release: release, + } +} + +type descriptorReference struct { + desc ocispecs.Descriptor + release func(context.Context) error +} + +func (d *descriptorReference) Descriptor() ocispecs.Descriptor { + return d.desc +} + +func (d *descriptorReference) Release() error { + return d.release(context.TODO()) +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/annotations.go b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/annotations.go new file mode 100644 index 0000000000..e7697d916a --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/annotations.go @@ -0,0 +1,115 @@ +package exptypes + +import ( + "fmt" + "regexp" + + "github.com/containerd/containerd/platforms" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + AnnotationIndex = "index" + AnnotationIndexDescriptor = "index-descriptor" + AnnotationManifest = "manifest" + AnnotationManifestDescriptor = "manifest-descriptor" +) + +var ( + keyAnnotationRegexp = regexp.MustCompile(`^annotation(?:-([a-z-]+))?(?:\[([A-Za-z0-9_/-]+)\])?\.(\S+)$`) +) + +type AnnotationKey struct { + Type string + Platform *ocispecs.Platform + Key string +} + +func (k AnnotationKey) String() string { + prefix := "annotation" + + switch k.Type { + case "": + case AnnotationManifest, AnnotationManifestDescriptor: + prefix += fmt.Sprintf("-%s", k.Type) + if p := k.PlatformString(); p != "" { + prefix += fmt.Sprintf("[%s]", p) + } + case AnnotationIndex, AnnotationIndexDescriptor: + prefix += "-" + k.Type + default: + panic("unknown annotation type") + } + + return fmt.Sprintf("%s.%s", prefix, k.Key) +} + +func (k AnnotationKey) PlatformString() string { + if k.Platform == nil { + return "" + } + return platforms.Format(*k.Platform) +} + +func AnnotationIndexKey(key string) string { + return AnnotationKey{ + Type: AnnotationIndex, + Key: key, + }.String() +} + +func AnnotationIndexDescriptorKey(key string) string { + return AnnotationKey{ + Type: AnnotationIndexDescriptor, + Key: key, + }.String() +} + +func AnnotationManifestKey(p *ocispecs.Platform, key string) string { + return AnnotationKey{ + Type: AnnotationManifest, + Platform: p, + Key: key, + }.String() +} + +func AnnotationManifestDescriptorKey(p *ocispecs.Platform, key string) string { + return AnnotationKey{ + Type: AnnotationManifestDescriptor, + Platform: p, + Key: key, + }.String() +} + +func ParseAnnotationKey(result string) (AnnotationKey, bool, error) { + groups := keyAnnotationRegexp.FindStringSubmatch(result) + if groups == nil { + return AnnotationKey{}, false, nil + } + + tp, platform, key := groups[1], groups[2], groups[3] + switch tp { + case AnnotationIndex, AnnotationIndexDescriptor, AnnotationManifest, AnnotationManifestDescriptor: + case "": + tp = AnnotationManifest + default: + return AnnotationKey{}, true, errors.Errorf("unrecognized annotation type %s", tp) + } + + var ociPlatform *ocispecs.Platform + if platform != "" { + p, err := platforms.Parse(platform) + if err != nil { + return AnnotationKey{}, true, err + } + ociPlatform = &p + } + + annotation := AnnotationKey{ + Type: tp, + Platform: ociPlatform, + Key: key, + } + return annotation, true, nil +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/parse.go b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/parse.go new file mode 100644 index 0000000000..f77cd3f525 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/parse.go @@ -0,0 +1,56 @@ +package exptypes + +import ( + "encoding/json" + "fmt" + + "github.com/containerd/containerd/platforms" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func ParsePlatforms(meta map[string][]byte) (Platforms, error) { + if platformsBytes, ok := meta[ExporterPlatformsKey]; ok { + var ps Platforms + if len(platformsBytes) > 0 { + if err := json.Unmarshal(platformsBytes, &ps); err != nil { + return Platforms{}, errors.Wrapf(err, "failed to parse platforms passed to provenance processor") + } + } + return ps, nil + } + + p := platforms.DefaultSpec() + if imgConfig, ok := meta[ExporterImageConfigKey]; ok { + var img ocispecs.Image + err := json.Unmarshal(imgConfig, &img) + if err != nil { + return Platforms{}, err + } + + if img.OS != "" && img.Architecture != "" { + p = ocispecs.Platform{ + Architecture: img.Architecture, + OS: img.OS, + OSVersion: img.OSVersion, + OSFeatures: img.OSFeatures, + Variant: img.Variant, + } + } + } + p = platforms.Normalize(p) + pk := platforms.Format(p) + ps := Platforms{ + Platforms: []Platform{{ID: pk, Platform: p}}, + } + return ps, nil +} + +func ParseKey(meta map[string][]byte, key string, p Platform) []byte { + if v, ok := meta[fmt.Sprintf("%s/%s", key, p.ID)]; ok { + return v + } else if v, ok := meta[key]; ok { + return v + } + return nil +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go index a18d660a5c..f22344c86a 100644 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go @@ -13,8 +13,17 @@ const ( ExporterInlineCache = "containerimage.inlinecache" ExporterBuildInfo = "containerimage.buildinfo" ExporterPlatformsKey = "refs.platforms" + ExporterEpochKey = "source.date.epoch" ) +// KnownRefMetadataKeys are the subset of exporter keys that can be suffixed by +// a platform to become platform specific +var KnownRefMetadataKeys = []string{ + ExporterImageConfigKey, + ExporterInlineCache, + ExporterBuildInfo, +} + type Platforms struct { Platforms []Platform } diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/image/docker_image.go b/vendor/github.com/moby/buildkit/exporter/containerimage/image/docker_image.go new file mode 100644 index 0000000000..a35d811d55 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/image/docker_image.go @@ -0,0 +1,52 @@ +package image + +import ( + "time" + + "github.com/docker/docker/api/types/strslice" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// ImageConfig is a docker compatible config for an image +type ImageConfig struct { + ocispecs.ImageConfig + + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + + // NetworkDisabled bool `json:",omitempty"` // Is network disabled + // MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + ocispecs.Image + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/opts.go b/vendor/github.com/moby/buildkit/exporter/containerimage/opts.go new file mode 100644 index 0000000000..c12d86127e --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/opts.go @@ -0,0 +1,160 @@ +package containerimage + +import ( + "strconv" + "time" + + cacheconfig "github.com/moby/buildkit/cache/config" + "github.com/moby/buildkit/exporter/util/epoch" + "github.com/moby/buildkit/util/compression" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + keyImageName = "name" + keyLayerCompression = "compression" + keyCompressionLevel = "compression-level" + keyForceCompression = "force-compression" + keyOCITypes = "oci-mediatypes" + keyBuildInfo = "buildinfo" + keyBuildInfoAttrs = "buildinfo-attrs" + keyForceInlineAttestations = "attestation-inline" + + // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was + // already found to use a non-distributable media type. + // When this option is not set, the exporter will change the media type of the layer to a distributable one. + keyPreferNondistLayers = "prefer-nondist-layers" +) + +type ImageCommitOpts struct { + ImageName string + RefCfg cacheconfig.RefConfig + OCITypes bool + BuildInfo bool + BuildInfoAttrs bool + Annotations AnnotationsGroup + Epoch *time.Time + + ForceInlineAttestations bool // force inline attestations to be attached +} + +func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error) { + rest := make(map[string]string) + + as, optb, err := ParseAnnotations(toBytesMap(opt)) + if err != nil { + return nil, err + } + opt = toStringMap(optb) + + c.Epoch, opt, err = epoch.ParseExporterAttrs(opt) + if err != nil { + return nil, err + } + + for k, v := range opt { + var err error + switch k { + case keyImageName: + c.ImageName = v + case keyLayerCompression: + c.RefCfg.Compression.Type, err = compression.Parse(v) + case keyCompressionLevel: + ii, err2 := strconv.ParseInt(v, 10, 64) + if err != nil { + err = errors.Wrapf(err2, "non-int value %s specified for %s", v, k) + break + } + v := int(ii) + c.RefCfg.Compression.Level = &v + case keyForceCompression: + err = parseBoolWithDefault(&c.RefCfg.Compression.Force, k, v, true) + case keyOCITypes: + err = parseBoolWithDefault(&c.OCITypes, k, v, true) + case keyBuildInfo: + err = parseBoolWithDefault(&c.BuildInfo, k, v, true) + case keyBuildInfoAttrs: + err = parseBoolWithDefault(&c.BuildInfoAttrs, k, v, false) + case keyForceInlineAttestations: + err = parseBool(&c.ForceInlineAttestations, k, v) + case keyPreferNondistLayers: + err = parseBool(&c.RefCfg.PreferNonDistributable, k, v) + default: + rest[k] = v + } + + if err != nil { + return nil, err + } + } + + if c.RefCfg.Compression.Type.OnlySupportOCITypes() { + c.EnableOCITypes(c.RefCfg.Compression.Type.String()) + } + + if c.RefCfg.Compression.Type.NeedsForceCompression() { + c.EnableForceCompression(c.RefCfg.Compression.Type.String()) + } + + c.Annotations = c.Annotations.Merge(as) + + return rest, nil +} + +func (c *ImageCommitOpts) EnableOCITypes(reason string) { + if !c.OCITypes { + message := "forcibly turning on oci-mediatype mode" + if reason != "" { + message += " for " + reason + } + logrus.Warn(message) + + c.OCITypes = true + } +} + +func (c *ImageCommitOpts) EnableForceCompression(reason string) { + if !c.RefCfg.Compression.Force { + message := "forcibly turning on force-compression mode" + if reason != "" { + message += " for " + reason + } + logrus.Warn(message) + + c.RefCfg.Compression.Force = true + } +} + +func parseBool(dest *bool, key string, value string) error { + b, err := strconv.ParseBool(value) + if err != nil { + return errors.Wrapf(err, "non-bool value specified for %s", key) + } + *dest = b + return nil +} + +func parseBoolWithDefault(dest *bool, key string, value string, defaultValue bool) error { + if value == "" { + *dest = defaultValue + return nil + } + return parseBool(dest, key, value) +} + +func toBytesMap(m map[string]string) map[string][]byte { + result := make(map[string][]byte) + for k, v := range m { + result[k] = []byte(v) + } + return result +} + +func toStringMap(m map[string][]byte) map[string]string { + result := make(map[string]string) + for k, v := range m { + result[k] = string(v) + } + return result +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/patch.go b/vendor/github.com/moby/buildkit/exporter/containerimage/patch.go new file mode 100644 index 0000000000..93866b018b --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/patch.go @@ -0,0 +1,18 @@ +//go:build !nydus +// +build !nydus + +package containerimage + +import ( + "context" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func patchImageLayers(ctx context.Context, remote *solver.Remote, history []ocispecs.History, ref cache.ImmutableRef, opts *ImageCommitOpts, sg session.Group) (*solver.Remote, []ocispecs.History, error) { + remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, opts.OCITypes) + return remote, history, nil +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/patch_nydus.go b/vendor/github.com/moby/buildkit/exporter/containerimage/patch_nydus.go new file mode 100644 index 0000000000..3a9336a66f --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/patch_nydus.go @@ -0,0 +1,35 @@ +//go:build nydus +// +build nydus + +package containerimage + +import ( + "context" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/compression" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// patchImageLayers appends an extra nydus bootstrap layer +// to the manifest of nydus image, normalizes layers and +// history. The nydus bootstrap layer represents the whole +// metadata of filesystem view for the entire image. +func patchImageLayers(ctx context.Context, remote *solver.Remote, history []ocispecs.History, ref cache.ImmutableRef, opts *ImageCommitOpts, sg session.Group) (*solver.Remote, []ocispecs.History, error) { + if opts.RefCfg.Compression.Type != compression.Nydus { + remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, opts.OCITypes) + return remote, history, nil + } + + desc, err := cache.MergeNydus(ctx, ref, opts.RefCfg.Compression, sg) + if err != nil { + return nil, nil, errors.Wrap(err, "merge nydus layer") + } + remote.Descriptors = append(remote.Descriptors, *desc) + + remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, opts.OCITypes) + return remote, history, nil +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go b/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go new file mode 100644 index 0000000000..068d86958f --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go @@ -0,0 +1,835 @@ +package containerimage + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/cache" + cacheconfig "github.com/moby/buildkit/cache/config" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/attestation" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/exporter/util/epoch" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/result" + attestationTypes "github.com/moby/buildkit/util/attestation" + "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/buildinfo" + binfotypes "github.com/moby/buildkit/util/buildinfo/types" + "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/purl" + "github.com/moby/buildkit/util/system" + "github.com/moby/buildkit/util/tracing" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "golang.org/x/sync/errgroup" +) + +type WriterOpt struct { + Snapshotter snapshot.Snapshotter + ContentStore content.Store + Applier diff.Applier + Differ diff.Comparer +} + +func NewImageWriter(opt WriterOpt) (*ImageWriter, error) { + return &ImageWriter{opt: opt}, nil +} + +type ImageWriter struct { + opt WriterOpt +} + +func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, sessionID string, opts *ImageCommitOpts) (*ocispecs.Descriptor, error) { + if _, ok := inp.Metadata[exptypes.ExporterPlatformsKey]; len(inp.Refs) > 0 && !ok { + return nil, errors.Errorf("unable to export multiple refs, missing platforms mapping") + } + + isMap := len(inp.Refs) > 0 + + ps, err := exptypes.ParsePlatforms(inp.Metadata) + if err != nil { + return nil, err + } + + if !isMap { + // enable index if we need to include attestations + for _, p := range ps.Platforms { + if atts, ok := inp.Attestations[p.ID]; ok { + if !opts.ForceInlineAttestations { + // if we don't need force inline attestations (for oci + // exporter), filter them out + atts = attestation.Filter(atts, nil, map[string][]byte{ + result.AttestationInlineOnlyKey: []byte(strconv.FormatBool(true)), + }) + } + if len(atts) > 0 { + isMap = true + break + } + } + } + } + if opts.Epoch == nil { + if tm, ok, err := epoch.ParseSource(inp); err != nil { + return nil, err + } else if ok { + opts.Epoch = tm + } + } + + for pk, a := range opts.Annotations { + if pk != "" { + if _, ok := inp.FindRef(pk); !ok { + return nil, errors.Errorf("invalid annotation: no platform %s found in source", pk) + } + } + if len(a.Index)+len(a.IndexDescriptor)+len(a.ManifestDescriptor) > 0 { + opts.EnableOCITypes("annotations") + } + } + + if !isMap { + if len(ps.Platforms) > 1 { + return nil, errors.Errorf("cannot export multiple platforms without multi-platform enabled") + } + + var ref cache.ImmutableRef + var p exptypes.Platform + if len(ps.Platforms) > 0 { + p = ps.Platforms[0] + if r, ok := inp.FindRef(p.ID); ok { + ref = r + } + } else { + ref = inp.Ref + } + + remotes, err := ic.exportLayers(ctx, opts.RefCfg, session.NewGroup(sessionID), ref) + if err != nil { + return nil, err + } + + var dtbi []byte + if opts.BuildInfo { + if dtbi, err = buildinfo.Format(exptypes.ParseKey(inp.Metadata, exptypes.ExporterBuildInfo, p), buildinfo.FormatOpts{ + RemoveAttrs: !opts.BuildInfoAttrs, + }); err != nil { + return nil, err + } + } + + annotations := opts.Annotations.Platform(nil) + if len(annotations.Index) > 0 || len(annotations.IndexDescriptor) > 0 { + return nil, errors.Errorf("index annotations not supported for single platform export") + } + + config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p) + inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p) + mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, opts, ref, config, &remotes[0], annotations, inlineCache, dtbi, opts.Epoch, session.NewGroup(sessionID)) + if err != nil { + return nil, err + } + if mfstDesc.Annotations == nil { + mfstDesc.Annotations = make(map[string]string) + } + if len(ps.Platforms) == 1 { + mfstDesc.Platform = &ps.Platforms[0].Platform + } + mfstDesc.Annotations[exptypes.ExporterConfigDigestKey] = configDesc.Digest.String() + + return mfstDesc, nil + } + + if len(inp.Attestations) > 0 { + opts.EnableOCITypes("attestations") + } + + refs := make([]cache.ImmutableRef, 0, len(inp.Refs)) + remotesMap := make(map[string]int, len(inp.Refs)) + for _, p := range ps.Platforms { + r, ok := inp.FindRef(p.ID) + if !ok { + return nil, errors.Errorf("failed to find ref for ID %s", p.ID) + } + remotesMap[p.ID] = len(refs) + refs = append(refs, r) + } + + remotes, err := ic.exportLayers(ctx, opts.RefCfg, session.NewGroup(sessionID), refs...) + if err != nil { + return nil, err + } + + idx := struct { + // MediaType is reserved in the OCI spec but + // excluded from go types. + MediaType string `json:"mediaType,omitempty"` + + ocispecs.Index + }{ + MediaType: ocispecs.MediaTypeImageIndex, + Index: ocispecs.Index{ + Annotations: opts.Annotations.Platform(nil).Index, + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + }, + } + + if !opts.OCITypes { + idx.MediaType = images.MediaTypeDockerSchema2ManifestList + } + + labels := map[string]string{} + + var attestationManifests []ocispecs.Descriptor + + for i, p := range ps.Platforms { + r, ok := inp.FindRef(p.ID) + if !ok { + return nil, errors.Errorf("failed to find ref for ID %s", p.ID) + } + config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p) + inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p) + + var dtbi []byte + if opts.BuildInfo { + if dtbi, err = buildinfo.Format(exptypes.ParseKey(inp.Metadata, exptypes.ExporterBuildInfo, p), buildinfo.FormatOpts{ + RemoveAttrs: !opts.BuildInfoAttrs, + }); err != nil { + return nil, err + } + } + + remote := &remotes[remotesMap[p.ID]] + if remote == nil { + remote = &solver.Remote{ + Provider: ic.opt.ContentStore, + } + } + + desc, _, err := ic.commitDistributionManifest(ctx, opts, r, config, remote, opts.Annotations.Platform(&p.Platform), inlineCache, dtbi, opts.Epoch, session.NewGroup(sessionID)) + if err != nil { + return nil, err + } + dp := p.Platform + desc.Platform = &dp + idx.Manifests = append(idx.Manifests, *desc) + + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = desc.Digest.String() + + if attestations, ok := inp.Attestations[p.ID]; ok { + attestations, err := attestation.Unbundle(ctx, session.NewGroup(sessionID), attestations) + if err != nil { + return nil, err + } + + eg, ctx2 := errgroup.WithContext(ctx) + for i, att := range attestations { + i, att := i, att + eg.Go(func() error { + att, err := supplementSBOM(ctx2, session.NewGroup(sessionID), r, remote, att) + if err != nil { + return err + } + attestations[i] = att + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, err + } + + var defaultSubjects []intoto.Subject + for _, name := range strings.Split(opts.ImageName, ",") { + if name == "" { + continue + } + pl, err := purl.RefToPURL(name, &p.Platform) + if err != nil { + return nil, err + } + defaultSubjects = append(defaultSubjects, intoto.Subject{ + Name: pl, + Digest: result.ToDigestMap(desc.Digest), + }) + } + stmts, err := attestation.MakeInTotoStatements(ctx, session.NewGroup(sessionID), attestations, defaultSubjects) + if err != nil { + return nil, err + } + + desc, err := ic.commitAttestationsManifest(ctx, opts, p, desc.Digest.String(), stmts) + if err != nil { + return nil, err + } + desc.Platform = &intotoPlatform + attestationManifests = append(attestationManifests, *desc) + } + } + + for i, mfst := range attestationManifests { + idx.Manifests = append(idx.Manifests, mfst) + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", len(ps.Platforms)+i)] = mfst.Digest.String() + } + + idxBytes, err := json.MarshalIndent(idx, "", " ") + if err != nil { + return nil, errors.Wrap(err, "failed to marshal index") + } + + idxDigest := digest.FromBytes(idxBytes) + idxDesc := ocispecs.Descriptor{ + Digest: idxDigest, + Size: int64(len(idxBytes)), + MediaType: idx.MediaType, + Annotations: opts.Annotations.Platform(nil).IndexDescriptor, + } + idxDone := progress.OneOff(ctx, "exporting manifest list "+idxDigest.String()) + + if err := content.WriteBlob(ctx, ic.opt.ContentStore, idxDigest.String(), bytes.NewReader(idxBytes), idxDesc, content.WithLabels(labels)); err != nil { + return nil, idxDone(errors.Wrapf(err, "error writing manifest list blob %s", idxDigest)) + } + idxDone(nil) + + return &idxDesc, nil +} + +func (ic *ImageWriter) exportLayers(ctx context.Context, refCfg cacheconfig.RefConfig, s session.Group, refs ...cache.ImmutableRef) ([]solver.Remote, error) { + attr := []attribute.KeyValue{ + attribute.String("exportLayers.compressionType", refCfg.Compression.Type.String()), + attribute.Bool("exportLayers.forceCompression", refCfg.Compression.Force), + } + if refCfg.Compression.Level != nil { + attr = append(attr, attribute.Int("exportLayers.compressionLevel", *refCfg.Compression.Level)) + } + span, ctx := tracing.StartSpan(ctx, "export layers", trace.WithAttributes(attr...)) + + eg, ctx := errgroup.WithContext(ctx) + layersDone := progress.OneOff(ctx, "exporting layers") + + out := make([]solver.Remote, len(refs)) + + for i, ref := range refs { + func(i int, ref cache.ImmutableRef) { + if ref == nil { + return + } + eg.Go(func() error { + remotes, err := ref.GetRemotes(ctx, true, refCfg, false, s) + if err != nil { + return err + } + remote := remotes[0] + out[i] = *remote + return nil + }) + }(i, ref) + } + + err := layersDone(eg.Wait()) + tracing.FinishWithError(span, err) + return out, err +} + +func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *ImageCommitOpts, ref cache.ImmutableRef, config []byte, remote *solver.Remote, annotations *Annotations, inlineCache []byte, buildInfo []byte, epoch *time.Time, sg session.Group) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) { + if len(config) == 0 { + var err error + config, err = defaultImageConfig() + if err != nil { + return nil, nil, err + } + } + + history, err := parseHistoryFromConfig(config) + if err != nil { + return nil, nil, err + } + + remote, history, err = patchImageLayers(ctx, remote, history, ref, opts, sg) + if err != nil { + return nil, nil, err + } + + config, err = patchImageConfig(config, remote.Descriptors, history, inlineCache, buildInfo, epoch) + if err != nil { + return nil, nil, err + } + + var ( + configDigest = digest.FromBytes(config) + manifestType = ocispecs.MediaTypeImageManifest + configType = ocispecs.MediaTypeImageConfig + ) + + // Use docker media types for older Docker versions and registries + if !opts.OCITypes { + manifestType = images.MediaTypeDockerSchema2Manifest + configType = images.MediaTypeDockerSchema2Config + } + + mfst := struct { + // MediaType is reserved in the OCI spec but + // excluded from go types. + MediaType string `json:"mediaType,omitempty"` + + ocispecs.Manifest + }{ + MediaType: manifestType, + Manifest: ocispecs.Manifest{ + Annotations: annotations.Manifest, + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: ocispecs.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, + }, + }, + } + + labels := map[string]string{ + "containerd.io/gc.ref.content.0": configDigest.String(), + } + + for _, desc := range remote.Descriptors { + desc.Annotations = RemoveInternalLayerAnnotations(desc.Annotations, opts.OCITypes) + mfst.Layers = append(mfst.Layers, desc) + } + + mfstJSON, err := json.MarshalIndent(mfst, "", " ") + if err != nil { + return nil, nil, errors.Wrap(err, "failed to marshal manifest") + } + + mfstDigest := digest.FromBytes(mfstJSON) + mfstDesc := ocispecs.Descriptor{ + Digest: mfstDigest, + Size: int64(len(mfstJSON)), + } + mfstDone := progress.OneOff(ctx, "exporting manifest "+mfstDigest.String()) + + if err := content.WriteBlob(ctx, ic.opt.ContentStore, mfstDigest.String(), bytes.NewReader(mfstJSON), mfstDesc, content.WithLabels((labels))); err != nil { + return nil, nil, mfstDone(errors.Wrapf(err, "error writing manifest blob %s", mfstDigest)) + } + mfstDone(nil) + + configDesc := ocispecs.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, + } + configDone := progress.OneOff(ctx, "exporting config "+configDigest.String()) + + if err := content.WriteBlob(ctx, ic.opt.ContentStore, configDigest.String(), bytes.NewReader(config), configDesc); err != nil { + return nil, nil, configDone(errors.Wrap(err, "error writing config blob")) + } + configDone(nil) + + return &ocispecs.Descriptor{ + Annotations: annotations.ManifestDescriptor, + Digest: mfstDigest, + Size: int64(len(mfstJSON)), + MediaType: manifestType, + }, &configDesc, nil +} + +func (ic *ImageWriter) commitAttestationsManifest(ctx context.Context, opts *ImageCommitOpts, p exptypes.Platform, target string, statements []intoto.Statement) (*ocispecs.Descriptor, error) { + var ( + manifestType = ocispecs.MediaTypeImageManifest + configType = ocispecs.MediaTypeImageConfig + ) + if !opts.OCITypes { + manifestType = images.MediaTypeDockerSchema2Manifest + configType = images.MediaTypeDockerSchema2Config + } + + layers := make([]ocispecs.Descriptor, len(statements)) + for i, statement := range statements { + i, statement := i, statement + + data, err := json.Marshal(statement) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal attestation") + } + digest := digest.FromBytes(data) + desc := ocispecs.Descriptor{ + MediaType: attestationTypes.MediaTypeDockerSchema2AttestationType, + Digest: digest, + Size: int64(len(data)), + Annotations: map[string]string{ + "containerd.io/uncompressed": digest.String(), + "in-toto.io/predicate-type": statement.PredicateType, + }, + } + + if err := content.WriteBlob(ctx, ic.opt.ContentStore, digest.String(), bytes.NewReader(data), desc); err != nil { + return nil, errors.Wrapf(err, "error writing data blob %s", digest) + } + layers[i] = desc + } + + config, err := attestationsConfig(layers) + if err != nil { + return nil, err + } + configDigest := digest.FromBytes(config) + configDesc := ocispecs.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, + } + + mfst := struct { + // MediaType is reserved in the OCI spec but + // excluded from go types. + MediaType string `json:"mediaType,omitempty"` + + ocispecs.Manifest + }{ + MediaType: manifestType, + Manifest: ocispecs.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: ocispecs.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, + }, + }, + } + + labels := map[string]string{ + "containerd.io/gc.ref.content.0": configDigest.String(), + } + for i, desc := range layers { + desc.Annotations = RemoveInternalLayerAnnotations(desc.Annotations, opts.OCITypes) + mfst.Layers = append(mfst.Layers, desc) + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = desc.Digest.String() + } + + mfstJSON, err := json.MarshalIndent(mfst, "", " ") + if err != nil { + return nil, errors.Wrap(err, "failed to marshal manifest") + } + + mfstDigest := digest.FromBytes(mfstJSON) + mfstDesc := ocispecs.Descriptor{ + Digest: mfstDigest, + Size: int64(len(mfstJSON)), + } + + done := progress.OneOff(ctx, "exporting attestation manifest "+mfstDigest.String()) + if err := content.WriteBlob(ctx, ic.opt.ContentStore, mfstDigest.String(), bytes.NewReader(mfstJSON), mfstDesc, content.WithLabels((labels))); err != nil { + return nil, done(errors.Wrapf(err, "error writing manifest blob %s", mfstDigest)) + } + if err := content.WriteBlob(ctx, ic.opt.ContentStore, configDigest.String(), bytes.NewReader(config), configDesc); err != nil { + return nil, done(errors.Wrap(err, "error writing config blob")) + } + done(nil) + + return &ocispecs.Descriptor{ + Digest: mfstDigest, + Size: int64(len(mfstJSON)), + MediaType: manifestType, + Annotations: map[string]string{ + attestationTypes.DockerAnnotationReferenceType: attestationTypes.DockerAnnotationReferenceTypeDefault, + attestationTypes.DockerAnnotationReferenceDigest: target, + }, + }, nil +} + +func (ic *ImageWriter) ContentStore() content.Store { + return ic.opt.ContentStore +} + +func (ic *ImageWriter) Snapshotter() snapshot.Snapshotter { + return ic.opt.Snapshotter +} + +func (ic *ImageWriter) Applier() diff.Applier { + return ic.opt.Applier +} + +func defaultImageConfig() ([]byte, error) { + pl := platforms.Normalize(platforms.DefaultSpec()) + + img := ocispecs.Image{ + Architecture: pl.Architecture, + OS: pl.OS, + Variant: pl.Variant, + } + img.RootFS.Type = "layers" + img.Config.WorkingDir = "/" + img.Config.Env = []string{"PATH=" + system.DefaultPathEnv(pl.OS)} + dt, err := json.Marshal(img) + return dt, errors.Wrap(err, "failed to create empty image config") +} + +func attestationsConfig(layers []ocispecs.Descriptor) ([]byte, error) { + img := ocispecs.Image{ + Architecture: intotoPlatform.Architecture, + OS: intotoPlatform.OS, + OSVersion: intotoPlatform.OSVersion, + OSFeatures: intotoPlatform.OSFeatures, + Variant: intotoPlatform.Variant, + } + img.RootFS.Type = "layers" + for _, layer := range layers { + img.RootFS.DiffIDs = append(img.RootFS.DiffIDs, digest.Digest(layer.Annotations["containerd.io/uncompressed"])) + } + dt, err := json.Marshal(img) + return dt, errors.Wrap(err, "failed to create attestations image config") +} + +func parseHistoryFromConfig(dt []byte) ([]ocispecs.History, error) { + var config struct { + History []ocispecs.History + } + if err := json.Unmarshal(dt, &config); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal history from config") + } + return config.History, nil +} + +func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs.History, cache []byte, buildInfo []byte, epoch *time.Time) ([]byte, error) { + m := map[string]json.RawMessage{} + if err := json.Unmarshal(dt, &m); err != nil { + return nil, errors.Wrap(err, "failed to parse image config for patch") + } + + var rootFS ocispecs.RootFS + rootFS.Type = "layers" + for _, desc := range descs { + rootFS.DiffIDs = append(rootFS.DiffIDs, digest.Digest(desc.Annotations["containerd.io/uncompressed"])) + } + dt, err := json.Marshal(rootFS) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal rootfs") + } + m["rootfs"] = dt + + if epoch != nil { + for i, h := range history { + if h.Created == nil || h.Created.After(*epoch) { + history[i].Created = epoch + } + } + } + + dt, err = json.Marshal(history) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal history") + } + m["history"] = dt + + // if epoch is set then clamp creation time + if v, ok := m["created"]; ok && epoch != nil { + var tm time.Time + if err := json.Unmarshal(v, &tm); err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal creation time %q", m["created"]) + } + if tm.After(*epoch) { + dt, err = json.Marshal(&epoch) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal creation time") + } + m["created"] = dt + } + } + + if _, ok := m["created"]; !ok { + var tm *time.Time + for _, h := range history { + if h.Created != nil { + tm = h.Created + } + } + dt, err = json.Marshal(&tm) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal creation time") + } + m["created"] = dt + } + + if cache != nil { + dt, err := json.Marshal(cache) + if err != nil { + return nil, err + } + m["moby.buildkit.cache.v0"] = dt + } + + if buildInfo != nil { + dt, err := json.Marshal(buildInfo) + if err != nil { + return nil, err + } + m[binfotypes.ImageConfigField] = dt + } else { + delete(m, binfotypes.ImageConfigField) + } + + dt, err = json.Marshal(m) + return dt, errors.Wrap(err, "failed to marshal config after patch") +} + +func normalizeLayersAndHistory(ctx context.Context, remote *solver.Remote, history []ocispecs.History, ref cache.ImmutableRef, oci bool) (*solver.Remote, []ocispecs.History) { + refMeta := getRefMetadata(ref, len(remote.Descriptors)) + + var historyLayers int + for _, h := range history { + if !h.EmptyLayer { + historyLayers++ + } + } + + if historyLayers > len(remote.Descriptors) { + // this case shouldn't happen but if it does force set history layers empty + // from the bottom + bklog.G(ctx).Warn("invalid image config with unaccounted layers") + historyCopy := make([]ocispecs.History, 0, len(history)) + var l int + for _, h := range history { + if l >= len(remote.Descriptors) { + h.EmptyLayer = true + } + if !h.EmptyLayer { + l++ + } + historyCopy = append(historyCopy, h) + } + history = historyCopy + } + + if len(remote.Descriptors) > historyLayers { + // some history items are missing. add them based on the ref metadata + for _, md := range refMeta[historyLayers:] { + history = append(history, ocispecs.History{ + Created: md.createdAt, + CreatedBy: md.description, + Comment: "buildkit.exporter.image.v0", + }) + } + } + + var layerIndex int + for i, h := range history { + if !h.EmptyLayer { + if h.Created == nil { + h.Created = refMeta[layerIndex].createdAt + } + layerIndex++ + } + history[i] = h + } + + // Find the first new layer time. Otherwise, the history item for a first + // metadata command would be the creation time of a base image layer. + // If there is no such then the last layer with timestamp. + var created *time.Time + var noCreatedTime bool + for _, h := range history { + if h.Created != nil { + created = h.Created + if noCreatedTime { + break + } + } else { + noCreatedTime = true + } + } + + // Fill in created times for all history items to be either the first new + // layer time or the previous layer. + noCreatedTime = false + for i, h := range history { + if h.Created != nil { + if noCreatedTime { + created = h.Created + } + } else { + noCreatedTime = true + h.Created = created + } + history[i] = h + } + + // convert between oci and docker media types (or vice versa) if needed + remote.Descriptors = compression.ConvertAllLayerMediaTypes(oci, remote.Descriptors...) + + return remote, history +} + +func RemoveInternalLayerAnnotations(in map[string]string, oci bool) map[string]string { + if len(in) == 0 || !oci { + return nil + } + m := make(map[string]string, len(in)) + for k, v := range in { + // oci supports annotations but don't export internal annotations + switch k { + case "containerd.io/uncompressed", "buildkit/createdat": + continue + default: + if strings.HasPrefix(k, "containerd.io/distribution.source.") { + continue + } + m[k] = v + } + } + return m +} + +type refMetadata struct { + description string + createdAt *time.Time +} + +func getRefMetadata(ref cache.ImmutableRef, limit int) []refMetadata { + if ref == nil { + return make([]refMetadata, limit) + } + + layerChain := ref.LayerChain() + defer layerChain.Release(context.TODO()) + + if limit < len(layerChain) { + layerChain = layerChain[len(layerChain)-limit:] + } + + metas := make([]refMetadata, len(layerChain)) + for i, layer := range layerChain { + meta := &metas[i] + + if description := layer.GetDescription(); description != "" { + meta.description = description + } else { + meta.description = "created by buildkit" // shouldn't be shown but don't fail build + } + + createdAt := layer.GetCreatedAt() + meta.createdAt = &createdAt + } + return metas +} diff --git a/vendor/github.com/moby/buildkit/exporter/exporter.go b/vendor/github.com/moby/buildkit/exporter/exporter.go index 610481b710..0e7d8d14f2 100644 --- a/vendor/github.com/moby/buildkit/exporter/exporter.go +++ b/vendor/github.com/moby/buildkit/exporter/exporter.go @@ -4,25 +4,49 @@ import ( "context" "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/solver/result" "github.com/moby/buildkit/util/compression" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) +type Source = result.Result[cache.ImmutableRef] + +type Attestation = result.Attestation[cache.ImmutableRef] + type Exporter interface { Resolve(context.Context, map[string]string) (ExporterInstance, error) } type ExporterInstance interface { Name() string - Config() Config - Export(ctx context.Context, src Source, sessionID string) (map[string]string, error) + Config() *Config + Export(ctx context.Context, src *Source, sessionID string) (map[string]string, DescriptorReference, error) } -type Source struct { - Ref cache.ImmutableRef - Refs map[string]cache.ImmutableRef - Metadata map[string][]byte +type DescriptorReference interface { + Release() error + Descriptor() ocispecs.Descriptor } type Config struct { - Compression compression.Config + // Make the field private in case it is initialized with nil compression.Type + compression compression.Config +} + +func NewConfig() *Config { + return &Config{ + compression: compression.Config{ + Type: compression.Default, + }, + } +} + +func NewConfigWithCompression(comp compression.Config) *Config { + return &Config{ + compression: comp, + } +} + +func (c *Config) Compression() compression.Config { + return c.compression } diff --git a/vendor/github.com/moby/buildkit/exporter/local/export.go b/vendor/github.com/moby/buildkit/exporter/local/export.go index 5daa4aa426..7d08b172e0 100644 --- a/vendor/github.com/moby/buildkit/exporter/local/export.go +++ b/vendor/github.com/moby/buildkit/exporter/local/export.go @@ -2,24 +2,28 @@ package local import ( "context" - "io/ioutil" "os" "strings" "time" - "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/exporter/util/epoch" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/filesync" - "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/util/progress" + "github.com/pkg/errors" "github.com/tonistiigi/fsutil" fstypes "github.com/tonistiigi/fsutil/types" "golang.org/x/sync/errgroup" "golang.org/x/time/rate" ) +const ( + keyAttestationPrefix = "attestation-prefix" +) + type Opt struct { SessionManager *session.Manager } @@ -35,93 +39,103 @@ func New(opt Opt) (exporter.Exporter, error) { } func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { - return &localExporterInstance{localExporter: e}, nil -} - -type localExporterInstance struct { - *localExporter -} - -func (e *localExporterInstance) Name() string { - return "exporting to client" -} - -func (e *localExporter) Config() exporter.Config { - return exporter.Config{} -} - -func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) { - timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - - caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false) + tm, _, err := epoch.ParseExporterAttrs(opt) if err != nil { return nil, err } + i := &localExporterInstance{ + localExporter: e, + opts: CreateFSOpts{ + Epoch: tm, + }, + } + + for k, v := range opt { + switch k { + case keyAttestationPrefix: + i.opts.AttestationPrefix = v + } + } + + return i, nil +} + +type localExporterInstance struct { + *localExporter + opts CreateFSOpts +} + +func (e *localExporterInstance) Name() string { + return "exporting to client directory" +} + +func (e *localExporter) Config() *exporter.Config { + return exporter.NewConfig() +} + +func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source, sessionID string) (map[string]string, exporter.DescriptorReference, error) { + timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + if e.opts.Epoch == nil { + if tm, ok, err := epoch.ParseSource(inp); err != nil { + return nil, nil, err + } else if ok { + e.opts.Epoch = tm + } + } + + caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false) + if err != nil { + return nil, nil, err + } + isMap := len(inp.Refs) > 0 - export := func(ctx context.Context, k string, ref cache.ImmutableRef) func() error { + if _, ok := inp.Metadata[exptypes.ExporterPlatformsKey]; isMap && !ok { + return nil, nil, errors.Errorf("unable to export multiple refs, missing platforms mapping") + } + p, err := exptypes.ParsePlatforms(inp.Metadata) + if err != nil { + return nil, nil, err + } + + if !isMap && len(p.Platforms) > 1 { + return nil, nil, errors.Errorf("unable to export multiple platforms without map") + } + + now := time.Now().Truncate(time.Second) + + export := func(ctx context.Context, k string, ref cache.ImmutableRef, attestations []exporter.Attestation) func() error { return func() error { - var src string - var err error - var idmap *idtools.IdentityMapping - if ref == nil { - src, err = ioutil.TempDir("", "buildkit") - if err != nil { - return err - } - defer os.RemoveAll(src) - } else { - mount, err := ref.Mount(ctx, true, session.NewGroup(sessionID)) - if err != nil { - return err - } - - lm := snapshot.LocalMounter(mount) - - src, err = lm.Mount() - if err != nil { - return err - } - - idmap = mount.IdentityMapping() - - defer lm.Unmount() + outputFS, cleanup, err := CreateFS(ctx, sessionID, k, ref, attestations, now, e.opts) + if err != nil { + return err + } + if cleanup != nil { + defer cleanup() } - walkOpt := &fsutil.WalkOpt{} - - if idmap != nil { - walkOpt.Map = func(p string, st *fstypes.Stat) bool { - uid, gid, err := idmap.ToContainer(idtools.Identity{ - UID: int(st.Uid), - GID: int(st.Gid), - }) - if err != nil { - return false - } - st.Uid = uint32(uid) - st.Gid = uint32(gid) - return true - } - } - - fs := fsutil.NewFS(src, walkOpt) lbl := "copying files" if isMap { lbl += " " + k - fs, err = fsutil.SubDirFS([]fsutil.Dir{{FS: fs, Stat: fstypes.Stat{ + st := fstypes.Stat{ Mode: uint32(os.ModeDir | 0755), Path: strings.Replace(k, "/", "_", -1), - }}}) + } + if e.opts.Epoch != nil { + st.ModTime = e.opts.Epoch.UnixNano() + } + + outputFS, err = fsutil.SubDirFS([]fsutil.Dir{{FS: outputFS, Stat: st}}) if err != nil { return err } } - progress := newProgressHandler(ctx, lbl) - if err := filesync.CopyToCaller(ctx, fs, caller, progress); err != nil { + progress := NewProgressHandler(ctx, lbl) + if err := filesync.CopyToCaller(ctx, outputFS, caller, progress); err != nil { return err } return nil @@ -130,21 +144,25 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, eg, ctx := errgroup.WithContext(ctx) - if isMap { - for k, ref := range inp.Refs { - eg.Go(export(ctx, k, ref)) + if len(p.Platforms) > 0 { + for _, p := range p.Platforms { + r, ok := inp.FindRef(p.ID) + if !ok { + return nil, nil, errors.Errorf("failed to find ref for ID %s", p.ID) + } + eg.Go(export(ctx, p.ID, r, inp.Attestations[p.ID])) } } else { - eg.Go(export(ctx, "", inp.Ref)) + eg.Go(export(ctx, "", inp.Ref, nil)) } if err := eg.Wait(); err != nil { - return nil, err + return nil, nil, err } - return nil, nil + return nil, nil, nil } -func newProgressHandler(ctx context.Context, id string) func(int, bool) { +func NewProgressHandler(ctx context.Context, id string) func(int, bool) { limiter := rate.NewLimiter(rate.Every(100*time.Millisecond), 1) pw, _, _ := progress.NewFromContext(ctx) now := time.Now() diff --git a/vendor/github.com/moby/buildkit/exporter/local/fs.go b/vendor/github.com/moby/buildkit/exporter/local/fs.go new file mode 100644 index 0000000000..c5a524aae3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/local/fs.go @@ -0,0 +1,161 @@ +package local + +import ( + "context" + "encoding/json" + "io" + "io/fs" + "os" + "path" + "strconv" + "time" + + "github.com/docker/docker/pkg/idtools" + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/attestation" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/result" + "github.com/moby/buildkit/util/staticfs" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" + fstypes "github.com/tonistiigi/fsutil/types" +) + +type CreateFSOpts struct { + Epoch *time.Time + AttestationPrefix string +} + +func CreateFS(ctx context.Context, sessionID string, k string, ref cache.ImmutableRef, attestations []exporter.Attestation, defaultTime time.Time, opt CreateFSOpts) (fsutil.FS, func() error, error) { + var cleanup func() error + var src string + var err error + var idmap *idtools.IdentityMapping + if ref == nil { + src, err = os.MkdirTemp("", "buildkit") + if err != nil { + return nil, nil, err + } + cleanup = func() error { return os.RemoveAll(src) } + } else { + mount, err := ref.Mount(ctx, true, session.NewGroup(sessionID)) + if err != nil { + return nil, nil, err + } + + lm := snapshot.LocalMounter(mount) + + src, err = lm.Mount() + if err != nil { + return nil, nil, err + } + + idmap = mount.IdentityMapping() + + cleanup = lm.Unmount + } + + walkOpt := &fsutil.WalkOpt{} + var idMapFunc func(p string, st *fstypes.Stat) fsutil.MapResult + + if idmap != nil { + idMapFunc = func(p string, st *fstypes.Stat) fsutil.MapResult { + uid, gid, err := idmap.ToContainer(idtools.Identity{ + UID: int(st.Uid), + GID: int(st.Gid), + }) + if err != nil { + return fsutil.MapResultExclude + } + st.Uid = uint32(uid) + st.Gid = uint32(gid) + return fsutil.MapResultKeep + } + } + + walkOpt.Map = func(p string, st *fstypes.Stat) fsutil.MapResult { + res := fsutil.MapResultKeep + if idMapFunc != nil { + res = idMapFunc(p, st) + } + if opt.Epoch != nil { + st.ModTime = opt.Epoch.UnixNano() + } + return res + } + + outputFS := fsutil.NewFS(src, walkOpt) + attestations = attestation.Filter(attestations, nil, map[string][]byte{ + result.AttestationInlineOnlyKey: []byte(strconv.FormatBool(true)), + }) + attestations, err = attestation.Unbundle(ctx, session.NewGroup(sessionID), attestations) + if err != nil { + return nil, nil, err + } + if len(attestations) > 0 { + subjects := []intoto.Subject{} + err = outputFS.Walk(ctx, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + if !info.Mode().IsRegular() { + return nil + } + f, err := outputFS.Open(path) + if err != nil { + return err + } + defer f.Close() + d := digest.Canonical.Digester() + if _, err := io.Copy(d.Hash(), f); err != nil { + return err + } + subjects = append(subjects, intoto.Subject{ + Name: path, + Digest: result.ToDigestMap(d.Digest()), + }) + return nil + }) + if err != nil { + return nil, nil, err + } + + stmts, err := attestation.MakeInTotoStatements(ctx, session.NewGroup(sessionID), attestations, subjects) + if err != nil { + return nil, nil, err + } + stmtFS := staticfs.NewFS() + + names := map[string]struct{}{} + for i, stmt := range stmts { + dt, err := json.MarshalIndent(stmt, "", " ") + if err != nil { + return nil, nil, errors.Wrap(err, "failed to marshal attestation") + } + + name := opt.AttestationPrefix + path.Base(attestations[i].Path) + if _, ok := names[name]; ok { + return nil, nil, errors.Errorf("duplicate attestation path name %s", name) + } + names[name] = struct{}{} + + st := fstypes.Stat{ + Mode: 0600, + Path: name, + ModTime: defaultTime.UnixNano(), + } + if opt.Epoch != nil { + st.ModTime = opt.Epoch.UnixNano() + } + stmtFS.Add(name, st, dt) + } + + outputFS = staticfs.NewMergeFS(outputFS, stmtFS) + } + + return outputFS, cleanup, nil +} diff --git a/vendor/github.com/moby/buildkit/exporter/tar/export.go b/vendor/github.com/moby/buildkit/exporter/tar/export.go index 0febefd0b0..4d136c89c1 100644 --- a/vendor/github.com/moby/buildkit/exporter/tar/export.go +++ b/vendor/github.com/moby/buildkit/exporter/tar/export.go @@ -2,18 +2,18 @@ package local import ( "context" - "io/ioutil" "os" "strconv" "strings" "time" - "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/exporter/local" + "github.com/moby/buildkit/exporter/util/epoch" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/filesync" - "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/util/progress" "github.com/pkg/errors" "github.com/tonistiigi/fsutil" @@ -21,6 +21,8 @@ import ( ) const ( + attestationPrefixKey = "attestation-prefix" + // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was // already found to use a non-distributable media type. // When this option is not set, the exporter will change the media type of the layer to a distributable one. @@ -44,13 +46,23 @@ func New(opt Opt) (exporter.Exporter, error) { func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { li := &localExporterInstance{localExporter: e} - v, ok := opt[preferNondistLayersKey] - if ok { - b, err := strconv.ParseBool(v) - if err != nil { - return nil, errors.Wrapf(err, "non-bool value for %s: %s", preferNondistLayersKey, v) + tm, opt, err := epoch.ParseExporterAttrs(opt) + if err != nil { + return nil, err + } + li.opts.Epoch = tm + + for k, v := range opt { + switch k { + case preferNondistLayersKey: + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value for %s: %s", preferNondistLayersKey, v) + } + li.preferNonDist = b + case attestationPrefixKey: + li.opts.AttestationPrefix = v } - li.preferNonDist = b } return li, nil @@ -58,19 +70,20 @@ func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exp type localExporterInstance struct { *localExporter + opts local.CreateFSOpts preferNonDist bool } func (e *localExporterInstance) Name() string { - return "exporting to client" + return "exporting to client tarball" } -func (e *localExporterInstance) Config() exporter.Config { - return exporter.Config{} +func (e *localExporterInstance) Config() *exporter.Config { + return exporter.NewConfig() } -func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) { - var defers []func() +func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source, sessionID string) (map[string]string, exporter.DescriptorReference, error) { + var defers []func() error defer func() { for i := len(defers) - 1; i >= 0; i-- { @@ -78,80 +91,79 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, } }() - getDir := func(ctx context.Context, k string, ref cache.ImmutableRef) (*fsutil.Dir, error) { - var src string - var err error - var idmap *idtools.IdentityMapping - if ref == nil { - src, err = ioutil.TempDir("", "buildkit") - if err != nil { - return nil, err - } - defers = append(defers, func() { os.RemoveAll(src) }) - } else { - mount, err := ref.Mount(ctx, true, session.NewGroup(sessionID)) - if err != nil { - return nil, err - } + if e.opts.Epoch == nil { + if tm, ok, err := epoch.ParseSource(inp); err != nil { + return nil, nil, err + } else if ok { + e.opts.Epoch = tm + } + } - lm := snapshot.LocalMounter(mount) + now := time.Now().Truncate(time.Second) - src, err = lm.Mount() - if err != nil { - return nil, err - } - - idmap = mount.IdentityMapping() - - defers = append(defers, func() { lm.Unmount() }) + getDir := func(ctx context.Context, k string, ref cache.ImmutableRef, attestations []exporter.Attestation) (*fsutil.Dir, error) { + outputFS, cleanup, err := local.CreateFS(ctx, sessionID, k, ref, attestations, now, e.opts) + if err != nil { + return nil, err + } + if cleanup != nil { + defers = append(defers, cleanup) } - walkOpt := &fsutil.WalkOpt{} - - if idmap != nil { - walkOpt.Map = func(p string, st *fstypes.Stat) bool { - uid, gid, err := idmap.ToContainer(idtools.Identity{ - UID: int(st.Uid), - GID: int(st.Gid), - }) - if err != nil { - return false - } - st.Uid = uint32(uid) - st.Gid = uint32(gid) - return true - } + st := fstypes.Stat{ + Mode: uint32(os.ModeDir | 0755), + Path: strings.Replace(k, "/", "_", -1), + } + if e.opts.Epoch != nil { + st.ModTime = e.opts.Epoch.UnixNano() } return &fsutil.Dir{ - FS: fsutil.NewFS(src, walkOpt), - Stat: fstypes.Stat{ - Mode: uint32(os.ModeDir | 0755), - Path: strings.Replace(k, "/", "_", -1), - }, + FS: outputFS, + Stat: st, }, nil } + isMap := len(inp.Refs) > 0 + if _, ok := inp.Metadata[exptypes.ExporterPlatformsKey]; isMap && !ok { + return nil, nil, errors.Errorf("unable to export multiple refs, missing platforms mapping") + } + p, err := exptypes.ParsePlatforms(inp.Metadata) + if err != nil { + return nil, nil, err + } + if !isMap && len(p.Platforms) > 1 { + return nil, nil, errors.Errorf("unable to export multiple platforms without map") + } + var fs fsutil.FS - if len(inp.Refs) > 0 { - dirs := make([]fsutil.Dir, 0, len(inp.Refs)) - for k, ref := range inp.Refs { - d, err := getDir(ctx, k, ref) + if len(p.Platforms) > 0 { + dirs := make([]fsutil.Dir, 0, len(p.Platforms)) + for _, p := range p.Platforms { + r, ok := inp.FindRef(p.ID) + if !ok { + return nil, nil, errors.Errorf("failed to find ref for ID %s", p.ID) + } + d, err := getDir(ctx, p.ID, r, inp.Attestations[p.ID]) if err != nil { - return nil, err + return nil, nil, err } dirs = append(dirs, *d) } - var err error - fs, err = fsutil.SubDirFS(dirs) - if err != nil { - return nil, err + if isMap { + var err error + fs, err = fsutil.SubDirFS(dirs) + if err != nil { + return nil, nil, err + } + } else { + fs = dirs[0].FS } } else { - d, err := getDir(ctx, "", inp.Ref) + d, err := getDir(ctx, "", inp.Ref, nil) if err != nil { - return nil, err + return nil, nil, err } fs = d.FS } @@ -161,34 +173,17 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false) if err != nil { - return nil, err + return nil, nil, err } w, err := filesync.CopyFileWriter(ctx, nil, caller) if err != nil { - return nil, err + return nil, nil, err } - report := oneOffProgress(ctx, "sending tarball") + report := progress.OneOff(ctx, "sending tarball") if err := fsutil.WriteTar(ctx, fs, w); err != nil { w.Close() - return nil, report(err) - } - return nil, report(w.Close()) -} - -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.NewFromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - // TODO: set error on status - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err + return nil, nil, report(err) } + return nil, nil, report(w.Close()) } diff --git a/vendor/github.com/moby/buildkit/exporter/util/epoch/parse.go b/vendor/github.com/moby/buildkit/exporter/util/epoch/parse.go new file mode 100644 index 0000000000..9d581ed913 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/util/epoch/parse.go @@ -0,0 +1,65 @@ +package epoch + +import ( + "strconv" + "time" + + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/pkg/errors" +) + +const ( + frontendSourceDateEpochArg = "build-arg:SOURCE_DATE_EPOCH" + + KeySourceDateEpoch = "source-date-epoch" +) + +func ParseBuildArgs(opt map[string]string) (string, bool) { + v, ok := opt[frontendSourceDateEpochArg] + return v, ok +} + +func ParseExporterAttrs(opt map[string]string) (*time.Time, map[string]string, error) { + rest := make(map[string]string, len(opt)) + + var tm *time.Time + + for k, v := range opt { + switch k { + case KeySourceDateEpoch: + var err error + tm, err = parseTime(k, v) + if err != nil { + return nil, nil, err + } + default: + rest[k] = v + } + } + + return tm, rest, nil +} + +func ParseSource(inp *exporter.Source) (*time.Time, bool, error) { + if v, ok := inp.Metadata[exptypes.ExporterEpochKey]; ok { + epoch, err := parseTime("", string(v)) + if err != nil { + return nil, false, errors.Wrapf(err, "invalid SOURCE_DATE_EPOCH from frontend: %q", v) + } + return epoch, true, nil + } + return nil, false, nil +} + +func parseTime(key, value string) (*time.Time, error) { + if value == "" { + return nil, nil + } + sde, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "invalid %s: %s", key, err) + } + tm := time.Unix(sde, 0) + return &tm, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/attestations/parse.go b/vendor/github.com/moby/buildkit/frontend/attestations/parse.go new file mode 100644 index 0000000000..00de649fde --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/attestations/parse.go @@ -0,0 +1,81 @@ +package attestations + +import ( + "encoding/csv" + "strings" + + "github.com/pkg/errors" +) + +const ( + KeyTypeSbom = "sbom" + KeyTypeProvenance = "provenance" +) + +const ( + defaultSBOMGenerator = "docker/buildkit-syft-scanner:stable-1" +) + +func Filter(v map[string]string) map[string]string { + attests := make(map[string]string) + for k, v := range v { + if strings.HasPrefix(k, "attest:") { + attests[k] = v + continue + } + if strings.HasPrefix(k, "build-arg:BUILDKIT_ATTEST_") { + attests[k] = v + continue + } + } + return attests +} + +func Validate(values map[string]map[string]string) (map[string]map[string]string, error) { + for k := range values { + if k != KeyTypeSbom && k != KeyTypeProvenance { + return nil, errors.Errorf("unknown attestation type %q", k) + } + } + return values, nil +} + +func Parse(values map[string]string) (map[string]map[string]string, error) { + attests := make(map[string]string) + for k, v := range values { + if strings.HasPrefix(k, "attest:") { + attests[strings.ToLower(strings.TrimPrefix(k, "attest:"))] = v + continue + } + if strings.HasPrefix(k, "build-arg:BUILDKIT_ATTEST_") { + attests[strings.ToLower(strings.TrimPrefix(k, "build-arg:BUILDKIT_ATTEST_"))] = v + continue + } + } + + out := make(map[string]map[string]string) + for k, v := range attests { + attrs := make(map[string]string) + out[k] = attrs + if k == KeyTypeSbom { + attrs["generator"] = defaultSBOMGenerator + } + if v == "" { + continue + } + csvReader := csv.NewReader(strings.NewReader(v)) + fields, err := csvReader.Read() + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", k) + } + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + if len(parts) != 2 { + parts = append(parts, "") + } + attrs[parts[0]] = parts[1] + } + } + + return Validate(out) +} diff --git a/vendor/github.com/moby/buildkit/frontend/attestations/sbom/sbom.go b/vendor/github.com/moby/buildkit/frontend/attestations/sbom/sbom.go new file mode 100644 index 0000000000..b4446aed45 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/attestations/sbom/sbom.go @@ -0,0 +1,110 @@ +package sbom + +import ( + "context" + "encoding/json" + "fmt" + "path" + "strings" + + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/moby/buildkit/client/llb" + gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/solver/result" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + CoreSBOMName = "sbom" + ExtraSBOMPrefix = CoreSBOMName + "-" + + srcDir = "/run/src/" + outDir = "/run/out/" +) + +// Scanner is a function type for scanning the contents of a state and +// returning a new attestation and state representing the scan results. +// +// A scanner is designed a scan a single state, however, additional states can +// also be attached, for attaching additional information, such as scans of +// build-contexts or multi-stage builds. Handling these separately allows the +// scanner to optionally ignore these or to mark them as such in the +// attestation. +type Scanner func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[llb.State], error) + +func CreateSBOMScanner(ctx context.Context, resolver llb.ImageMetaResolver, scanner string) (Scanner, error) { + if scanner == "" { + return nil, nil + } + + _, dt, err := resolver.ResolveImageConfig(ctx, scanner, llb.ResolveImageConfigOpt{}) + if err != nil { + return nil, err + } + + var cfg ocispecs.Image + if err := json.Unmarshal(dt, &cfg); err != nil { + return nil, err + } + + var args []string + args = append(args, cfg.Config.Entrypoint...) + args = append(args, cfg.Config.Cmd...) + if len(args) == 0 { + return nil, errors.Errorf("scanner %s does not have cmd", scanner) + } + + return func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[llb.State], error) { + var env []string + env = append(env, cfg.Config.Env...) + env = append(env, "BUILDKIT_SCAN_DESTINATION="+outDir) + env = append(env, "BUILDKIT_SCAN_SOURCE="+path.Join(srcDir, "core", CoreSBOMName)) + if len(extras) > 0 { + env = append(env, "BUILDKIT_SCAN_SOURCE_EXTRAS="+path.Join(srcDir, "extras/")) + } + + runOpts := []llb.RunOption{ + llb.WithCustomName(fmt.Sprintf("[%s] generating sbom using %s", name, scanner)), + } + for _, opt := range opts { + runOpts = append(runOpts, opt) + } + runOpts = append(runOpts, llb.Dir(cfg.Config.WorkingDir)) + runOpts = append(runOpts, llb.Args(args)) + for _, e := range env { + k, v, _ := strings.Cut(e, "=") + runOpts = append(runOpts, llb.AddEnv(k, v)) + } + + runscan := llb.Image(scanner).Run(runOpts...) + runscan.AddMount(path.Join(srcDir, "core", CoreSBOMName), ref, llb.Readonly) + for k, extra := range extras { + runscan.AddMount(path.Join(srcDir, "extras", ExtraSBOMPrefix+k), extra, llb.Readonly) + } + + stsbom := runscan.AddMount(outDir, llb.Scratch()) + return result.Attestation[llb.State]{ + Kind: gatewaypb.AttestationKindBundle, + Ref: stsbom, + Metadata: map[string][]byte{ + result.AttestationReasonKey: []byte(result.AttestationReasonSBOM), + result.AttestationSBOMCore: []byte(CoreSBOMName), + }, + InToto: result.InTotoAttestation{ + PredicateType: intoto.PredicateSPDX, + }, + }, nil + }, nil +} + +func HasSBOM[T any](res *result.Result[T]) bool { + for _, as := range res.Attestations { + for _, a := range as { + if a.InToto.PredicateType == intoto.PredicateSPDX { + return true + } + } + } + return false +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go index 3b18364d27..f9dd3643cd 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go @@ -12,6 +12,7 @@ import ( "regexp" "strconv" "strings" + "time" "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" @@ -19,15 +20,20 @@ import ( controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/attestations" + "github.com/moby/buildkit/frontend/attestations/sbom" "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" "github.com/moby/buildkit/frontend/dockerfile/dockerignore" "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/moby/buildkit/frontend/gateway/client" gwpb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/frontend/subrequests/outline" + "github.com/moby/buildkit/frontend/subrequests/targets" "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - binfotypes "github.com/moby/buildkit/util/buildinfo/types" + "github.com/moby/buildkit/solver/result" + "github.com/moby/buildkit/util/gitutil" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -39,45 +45,59 @@ const ( defaultDockerfileName = "Dockerfile" dockerignoreFilename = ".dockerignore" - buildArgPrefix = "build-arg:" - labelPrefix = "label:" + buildArgPrefix = "build-arg:" + labelPrefix = "label:" + contextPrefix = "context:" + inputMetadataPrefix = "input-metadata:" - keyTarget = "target" - keyFilename = "filename" - keyCacheFrom = "cache-from" // for registry only. deprecated in favor of keyCacheImports - keyCacheImports = "cache-imports" // JSON representation of []CacheOptionsEntry - keyCgroupParent = "cgroup-parent" - keyContextSubDir = "contextsubdir" - keyForceNetwork = "force-network-mode" - keyGlobalAddHosts = "add-hosts" - keyHostname = "hostname" - keyImageResolveMode = "image-resolve-mode" - keyMultiPlatform = "multi-platform" - keyNameContext = "contextkey" - keyNameDockerfile = "dockerfilekey" - keyNoCache = "no-cache" - keyOverrideCopyImage = "override-copy-image" // remove after CopyOp implemented - keyShmSize = "shm-size" - keyTargetPlatform = "platform" - keyUlimit = "ulimit" + keyTarget = "target" + keyFilename = "filename" + keyCacheFrom = "cache-from" // for registry only. deprecated in favor of keyCacheImports + keyCacheImports = "cache-imports" // JSON representation of []CacheOptionsEntry + keyCgroupParent = "cgroup-parent" + keyContextSubDir = "contextsubdir" + keyForceNetwork = "force-network-mode" + keyGlobalAddHosts = "add-hosts" + keyHostname = "hostname" + keyImageResolveMode = "image-resolve-mode" + keyMultiPlatform = "multi-platform" + keyNameContext = "contextkey" + keyNameDockerfile = "dockerfilekey" + keyNoCache = "no-cache" + keyShmSize = "shm-size" + keyTargetPlatform = "platform" + keyUlimit = "ulimit" + keyRequestID = "requestid" // Don't forget to update frontend documentation if you add - // a new build-arg: frontend/dockerfile/docs/syntax.md + // a new build-arg: frontend/dockerfile/docs/reference.md keyCacheNSArg = "build-arg:BUILDKIT_CACHE_MOUNT_NS" keyContextKeepGitDirArg = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR" keyHostnameArg = "build-arg:BUILDKIT_SANDBOX_HOSTNAME" keyMultiPlatformArg = "build-arg:BUILDKIT_MULTI_PLATFORM" keySyntaxArg = "build-arg:BUILDKIT_SYNTAX" + keySourceDateEpoch = "build-arg:SOURCE_DATE_EPOCH" ) var httpPrefix = regexp.MustCompile(`^https?://`) -var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`) -func Build(ctx context.Context, c client.Client) (*client.Result, error) { +func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) { opts := c.BuildOpts().Opts caps := c.BuildOpts().LLBCaps gwcaps := c.BuildOpts().Caps + if err := caps.Supports(pb.CapFileBase); err != nil { + return nil, errors.Wrap(err, "needs BuildKit 0.5 or later") + } + if opts["override-copy-image"] != "" { + return nil, errors.New("support for \"override-copy-image\" was removed in BuildKit 0.11") + } + if v, ok := opts["build-arg:BUILDKIT_DISABLE_FILEOP"]; ok { + if b, err := strconv.ParseBool(v); err == nil && b { + return nil, errors.New("support for \"build-arg:BUILDKIT_DISABLE_FILEOP\" was removed in BuildKit 0.11") + } + } + allowForward, capsError := validateCaps(opts["frontend.caps"]) if !allowForward && capsError != nil { return nil, capsError @@ -168,11 +188,13 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { llb.Differ(llb.DiffNone, false), ) - fileop := useFileOp(opts, &caps) - var buildContext *llb.State isNotLocalContext := false - if st, ok := detectGitContext(opts[localNameContext], opts[keyContextKeepGitDirArg]); ok { + keepGit := false + if v, err := strconv.ParseBool(opts[keyContextKeepGitDirArg]); err == nil { + keepGit = v + } + if st, ok := detectGitContext(opts[localNameContext], keepGit); ok { if !forceLocalDockerfile { src = *st } @@ -205,28 +227,13 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { return nil, errors.Wrapf(err, "failed to read downloaded context") } if isArchive(dt) { - if fileop { - bc := llb.Scratch().File(llb.Copy(httpContext, "/context", "/", &llb.CopyInfo{ - AttemptUnpack: true, - })) - if !forceLocalDockerfile { - src = bc - } - buildContext = &bc - } else { - copyImage := opts[keyOverrideCopyImage] - if copyImage == "" { - copyImage = dockerfile2llb.DefaultCopyImage - } - unpack := llb.Image(copyImage, dockerfile2llb.WithInternalName("helper image for file operations")). - Run(llb.Shlex("copy --unpack /src/context /out/"), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("extracting build context")) - unpack.AddMount("/src", httpContext, llb.Readonly) - bc := unpack.AddMount("/out", llb.Scratch()) - if !forceLocalDockerfile { - src = bc - } - buildContext = &bc + bc := llb.Scratch().File(llb.Copy(httpContext, "/context", "/", &llb.CopyInfo{ + AttemptUnpack: true, + })) + if !forceLocalDockerfile { + src = bc } + buildContext = &bc } else { filename = "context" if !forceLocalDockerfile { @@ -257,7 +264,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { if buildContext != nil { if sub, ok := opts[keyContextSubDir]; ok { - buildContext = scopeToSubDir(buildContext, fileop, sub) + buildContext = scopeToSubDir(buildContext, sub) } } @@ -380,7 +387,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { return nil, errors.Wrapf(err, "failed with %s = %s", keySyntaxArg, cmdline) } return res, err - } else if ref, cmdline, loc, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile)); ok { + } else if ref, cmdline, loc, ok := parser.DetectSyntax(dtDockerfile); ok { res, err := forwardGateway(ctx, c, ref, cmdline) if err != nil && len(errdefs.Sources(err)) == 0 { return nil, wrapSource(err, sourceMap, loc) @@ -408,7 +415,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { return nil, errors.Errorf("invalid boolean value %s", v) } if !b && exportMap { - return nil, errors.Errorf("returning multiple target plaforms is not allowed") + return nil, errors.Errorf("returning multiple target platforms is not allowed") } exportMap = b } @@ -422,55 +429,107 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { opts[keyHostname] = v } - eg, ctx = errgroup.WithContext(ctx) + epoch, err := parseSourceDateEpoch(opts[keySourceDateEpoch]) + if err != nil { + return nil, err + } + + target := opts[keyTarget] + convertOpt := dockerfile2llb.ConvertOpt{ + Target: target, + MetaResolver: c, + BuildArgs: filter(opts, buildArgPrefix), + Labels: filter(opts, labelPrefix), + CacheIDNamespace: opts[keyCacheNSArg], + SessionID: c.BuildOpts().SessionID, + BuildContext: buildContext, + Excludes: excludes, + IgnoreCache: ignoreCache, + TargetPlatform: targetPlatforms[0], + BuildPlatforms: buildPlatforms, + ImageResolveMode: resolveMode, + PrefixPlatform: exportMap, + ExtraHosts: extraHosts, + ShmSize: shmSize, + Ulimit: ulimit, + CgroupParent: opts[keyCgroupParent], + ForceNetMode: defaultNetMode, + LLBCaps: &caps, + SourceMap: sourceMap, + Hostname: opts[keyHostname], + SourceDateEpoch: epoch, + Warn: func(msg, url string, detail [][]byte, location *parser.Range) { + c.Warn(ctx, defVtx, msg, warnOpts(sourceMap, location, detail, url)) + }, + ContextByName: contextByNameFunc(c, c.BuildOpts().SessionID), + } + + defer func() { + var el *parser.ErrorLocation + if errors.As(err, &el) { + err = wrapSource(err, sourceMap, el.Location) + } + }() + + if req, ok := opts[keyRequestID]; ok { + switch req { + case outline.SubrequestsOutlineDefinition.Name: + o, err := dockerfile2llb.Dockefile2Outline(ctx, dtDockerfile, convertOpt) + if err != nil { + return nil, err + } + return o.ToResult() + case targets.SubrequestsTargetsDefinition.Name: + targets, err := dockerfile2llb.ListTargets(ctx, dtDockerfile) + if err != nil { + return nil, err + } + return targets.ToResult() + default: + return nil, errdefs.NewUnsupportedSubrequestError(req) + } + } + + var scanner sbom.Scanner + attests, err := attestations.Parse(opts) + if err != nil { + return nil, err + } + if attrs, ok := attests[attestations.KeyTypeSbom]; ok { + src, ok := attrs["generator"] + if !ok { + return nil, errors.Errorf("sbom scanner cannot be empty") + } + ref, err := reference.ParseNormalizedNamed(src) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse sbom scanner %s", src) + } + ref = reference.TagNameOnly(ref) + + scanner, err = sbom.CreateSBOMScanner(ctx, c, ref.String()) + if err != nil { + return nil, err + } + } + scanTargets := make([]*dockerfile2llb.SBOMTargets, len(targetPlatforms)) + + eg, ctx2 = errgroup.WithContext(ctx) for i, tp := range targetPlatforms { func(i int, tp *ocispecs.Platform) { eg.Go(func() (err error) { - defer func() { - var el *parser.ErrorLocation - if errors.As(err, &el) { - err = wrapSource(err, sourceMap, el.Location) - } - }() - - st, img, bi, err := dockerfile2llb.Dockerfile2LLB(ctx, dtDockerfile, dockerfile2llb.ConvertOpt{ - Target: opts[keyTarget], - MetaResolver: c, - BuildArgs: filter(opts, buildArgPrefix), - Labels: filter(opts, labelPrefix), - CacheIDNamespace: opts[keyCacheNSArg], - SessionID: c.BuildOpts().SessionID, - BuildContext: buildContext, - Excludes: excludes, - IgnoreCache: ignoreCache, - TargetPlatform: tp, - BuildPlatforms: buildPlatforms, - ImageResolveMode: resolveMode, - PrefixPlatform: exportMap, - ExtraHosts: extraHosts, - ShmSize: shmSize, - Ulimit: ulimit, - CgroupParent: opts[keyCgroupParent], - ForceNetMode: defaultNetMode, - OverrideCopyImage: opts[keyOverrideCopyImage], - LLBCaps: &caps, - SourceMap: sourceMap, - Hostname: opts[keyHostname], - Warn: func(msg, url string, detail [][]byte, location *parser.Range) { - if i != 0 { - return - } - c.Warn(ctx, defVtx, msg, warnOpts(sourceMap, location, detail, url)) - }, - ContextByName: contextByNameFunc(c), - }) - + opt := convertOpt + opt.TargetPlatform = tp + if i != 0 { + opt.Warn = nil + } + opt.ContextByName = contextByNameFunc(c, c.BuildOpts().SessionID) + st, img, scanTarget, err := dockerfile2llb.Dockerfile2LLB(ctx2, dtDockerfile, opt) if err != nil { return err } - def, err := st.Marshal(ctx) + def, err := st.Marshal(ctx2) if err != nil { return errors.Wrapf(err, "failed to marshal LLB definition") } @@ -506,7 +565,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { } } - r, err := c.Solve(ctx, client.SolveRequest{ + r, err := c.Solve(ctx2, client.SolveRequest{ Definition: def.ToPB(), CacheImports: cacheImports, }) @@ -519,30 +578,30 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { return err } - buildinfo, err := json.Marshal(bi) - if err != nil { - return errors.Wrapf(err, "failed to marshal build info") + p := platforms.DefaultSpec() + if tp != nil { + p = *tp } + p = platforms.Normalize(p) + k := platforms.Format(p) if !exportMap { res.AddMeta(exptypes.ExporterImageConfigKey, config) - res.AddMeta(exptypes.ExporterBuildInfo, buildinfo) res.SetRef(ref) - } else { - p := platforms.DefaultSpec() - if tp != nil { - p = *tp - } - k := platforms.Format(p) + expPlatforms.Platforms[i] = exptypes.Platform{ + ID: k, + Platform: p, + } + } else { res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, k), config) - res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), buildinfo) res.AddRef(k, ref) expPlatforms.Platforms[i] = exptypes.Platform{ ID: k, Platform: p, } } + scanTargets[i] = scanTarget return nil }) }(i, tp) @@ -552,14 +611,45 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { return nil, err } - if exportMap { - dt, err := json.Marshal(expPlatforms) - if err != nil { - return nil, err + if scanner != nil { + for i, p := range expPlatforms.Platforms { + target := scanTargets[i] + + var opts []llb.ConstraintsOpt + if target.IgnoreCache { + opts = append(opts, llb.IgnoreCache) + } + att, err := scanner(ctx, p.ID, target.Core, target.Extras, opts...) + if err != nil { + return nil, err + } + + attSolve, err := result.ConvertAttestation(&att, func(st llb.State) (client.Reference, error) { + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + r, err := c.Solve(ctx, frontend.SolveRequest{ + Definition: def.ToPB(), + }) + if err != nil { + return nil, err + } + return r.Ref, nil + }) + if err != nil { + return nil, err + } + res.AddAttestation(p.ID, *attSolve) } - res.AddMeta(exptypes.ExporterPlatformsKey, dt) } + dt, err := json.Marshal(expPlatforms) + if err != nil { + return nil, err + } + res.AddMeta(exptypes.ExporterPlatformsKey, dt) + return res, nil } @@ -606,40 +696,21 @@ func filter(opt map[string]string, key string) map[string]string { return m } -func detectGitContext(ref, gitContext string) (*llb.State, bool) { - found := false - if httpPrefix.MatchString(ref) && gitURLPathWithFragmentSuffix.MatchString(ref) { - found = true - } - - keepGit := false - if gitContext != "" { - if v, err := strconv.ParseBool(gitContext); err == nil { - keepGit = v - } - } - - for _, prefix := range []string{"git://", "github.com/", "git@"} { - if strings.HasPrefix(ref, prefix) { - found = true - break - } - } - if !found { +func detectGitContext(ref string, keepGit bool) (*llb.State, bool) { + g, err := gitutil.ParseGitRef(ref) + if err != nil { return nil, false } - - parts := strings.SplitN(ref, "#", 2) - branch := "" - if len(parts) > 1 { - branch = parts[1] + commit := g.Commit + if g.SubDir != "" { + commit += ":" + g.SubDir } gitOpts := []llb.GitOption{dockerfile2llb.WithInternalName("load git source " + ref)} if keepGit { gitOpts = append(gitOpts, llb.KeepGitDir()) } - st := llb.Git(parts[0], branch, gitOpts...) + st := llb.Git(g.Remote, commit, gitOpts...) return &st, true } @@ -765,27 +836,10 @@ func parseNetMode(v string) (pb.NetMode, error) { } } -func useFileOp(args map[string]string, caps *apicaps.CapSet) bool { - enabled := true - if v, ok := args["build-arg:BUILDKIT_DISABLE_FILEOP"]; ok { - if b, err := strconv.ParseBool(v); err == nil { - enabled = !b - } - } - return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil -} - -func scopeToSubDir(c *llb.State, fileop bool, dir string) *llb.State { - if fileop { - bc := llb.Scratch().File(llb.Copy(*c, dir, "/", &llb.CopyInfo{ - CopyDirContentsOnly: true, - })) - return &bc - } - unpack := llb.Image(dockerfile2llb.DefaultCopyImage, dockerfile2llb.WithInternalName("helper image for file operations")). - Run(llb.Shlexf("copy %s/. /out/", path.Join("/src", dir)), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("filtering build context")) - unpack.AddMount("/src", *c, llb.Readonly) - bc := unpack.AddMount("/out", llb.Scratch()) +func scopeToSubDir(c *llb.State, dir string) *llb.State { + bc := llb.Scratch().File(llb.Copy(*c, dir, "/", &llb.CopyInfo{ + CopyDirContentsOnly: true, + })) return &bc } @@ -812,11 +866,11 @@ func warnOpts(sm *llb.SourceMap, r *parser.Range, detail [][]byte, url string) c return opts } -func contextByNameFunc(c client.Client) func(context.Context, string, string, *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, *binfotypes.BuildInfo, error) { - return func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, *binfotypes.BuildInfo, error) { +func contextByNameFunc(c client.Client, sessionID string) func(context.Context, string, string, *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, error) { + return func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, error) { named, err := reference.ParseNormalizedNamed(name) if err != nil { - return nil, nil, nil, errors.Wrapf(err, "invalid context name %s", name) + return nil, nil, errors.Wrapf(err, "invalid context name %s", name) } name = strings.TrimSuffix(reference.FamiliarString(named), ":latest") @@ -825,35 +879,44 @@ func contextByNameFunc(c client.Client) func(context.Context, string, string, *o p = &pp } if p != nil { - name := name + "::" + platforms.Format(platforms.Normalize(*p)) - st, img, bi, err := contextByName(ctx, c, name, p, resolveMode) + pname := name + "::" + platforms.Format(platforms.Normalize(*p)) + st, img, err := contextByName(ctx, c, sessionID, name, pname, p, resolveMode) if err != nil { - return nil, nil, nil, err + return nil, nil, err } if st != nil { - return st, img, bi, nil + return st, img, nil } } - return contextByName(ctx, c, name, p, resolveMode) + return contextByName(ctx, c, sessionID, name, name, p, resolveMode) } } -func contextByName(ctx context.Context, c client.Client, name string, platform *ocispecs.Platform, resolveMode string) (*llb.State, *dockerfile2llb.Image, *binfotypes.BuildInfo, error) { +func contextByName(ctx context.Context, c client.Client, sessionID, name string, pname string, platform *ocispecs.Platform, resolveMode string) (*llb.State, *dockerfile2llb.Image, error) { opts := c.BuildOpts().Opts - v, ok := opts["context:"+name] + v, ok := opts[contextPrefix+pname] if !ok { - return nil, nil, nil, nil + return nil, nil, nil } vv := strings.SplitN(v, ":", 2) if len(vv) != 2 { - return nil, nil, nil, errors.Errorf("invalid context specifier %s for %s", v, name) + return nil, nil, errors.Errorf("invalid context specifier %s for %s", v, pname) + } + // allow git@ without protocol for SSH URLs for backwards compatibility + if strings.HasPrefix(vv[0], "git@") { + vv[0] = "git" } switch vv[0] { case "docker-image": ref := strings.TrimPrefix(vv[1], "//") + if ref == "scratch" { + st := llb.Scratch() + return &st, nil, nil + } + imgOpt := []llb.ImageOption{ - llb.WithCustomName("[context " + name + "] " + ref), + llb.WithCustomName("[context " + pname + "] " + ref), } if platform != nil { imgOpt = append(imgOpt, llb.Platform(*platform)) @@ -861,67 +924,129 @@ func contextByName(ctx context.Context, c client.Client, name string, platform * named, err := reference.ParseNormalizedNamed(ref) if err != nil { - return nil, nil, nil, err + return nil, nil, err } named = reference.TagNameOnly(named) _, data, err := c.ResolveImageConfig(ctx, named.String(), llb.ResolveImageConfigOpt{ - Platform: platform, - ResolveMode: resolveMode, - LogName: fmt.Sprintf("[context %s] load metadata for %s", name, ref), + Platform: platform, + ResolveMode: resolveMode, + LogName: fmt.Sprintf("[context %s] load metadata for %s", pname, ref), + ResolverType: llb.ResolverTypeRegistry, }) if err != nil { - return nil, nil, nil, err + return nil, nil, err } var img dockerfile2llb.Image if err := json.Unmarshal(data, &img); err != nil { - return nil, nil, nil, err + return nil, nil, err } img.Created = nil st := llb.Image(ref, imgOpt...) st, err = st.WithImageConfig(data) if err != nil { - return nil, nil, nil, err + return nil, nil, err } - return &st, &img, nil, nil + return &st, &img, nil case "git": - st, ok := detectGitContext(v, "1") + st, ok := detectGitContext(v, true) if !ok { - return nil, nil, nil, errors.Errorf("invalid git context %s", v) + return nil, nil, errors.Errorf("invalid git context %s", v) } - return st, nil, nil, nil + return st, nil, nil case "http", "https": - st, ok := detectGitContext(v, "1") + st, ok := detectGitContext(v, true) if !ok { - httpst := llb.HTTP(v, llb.WithCustomName("[context "+name+"] "+v)) + httpst := llb.HTTP(v, llb.WithCustomName("[context "+pname+"] "+v)) st = &httpst } - return st, nil, nil, nil + return st, nil, nil + case "oci-layout": + refSpec := strings.TrimPrefix(vv[1], "//") + ref, err := reference.Parse(refSpec) + if err != nil { + return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", refSpec) + } + named, ok := ref.(reference.Named) + if !ok { + return nil, nil, errors.Errorf("oci-layout reference %q has no name", ref.String()) + } + dgstd, ok := named.(reference.Digested) + if !ok { + return nil, nil, errors.Errorf("oci-layout reference %q has no digest", named.String()) + } + + // for the dummy ref primarily used in log messages, we can use the + // original name, since the store key may not be significant + dummyRef, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", name) + } + dummyRef, err = reference.WithDigest(dummyRef, dgstd.Digest()) + if err != nil { + return nil, nil, errors.Wrapf(err, "could not wrap %q with digest", name) + } + + _, data, err := c.ResolveImageConfig(ctx, dummyRef.String(), llb.ResolveImageConfigOpt{ + Platform: platform, + ResolveMode: resolveMode, + LogName: fmt.Sprintf("[context %s] load metadata for %s", pname, dummyRef.String()), + ResolverType: llb.ResolverTypeOCILayout, + Store: llb.ResolveImageConfigOptStore{ + SessionID: sessionID, + StoreID: named.Name(), + }, + }) + if err != nil { + return nil, nil, err + } + + var img dockerfile2llb.Image + if err := json.Unmarshal(data, &img); err != nil { + return nil, nil, errors.Wrap(err, "could not parse oci-layout image config") + } + + ociOpt := []llb.OCILayoutOption{ + llb.WithCustomName("[context " + pname + "] OCI load from client"), + llb.OCIStore(c.BuildOpts().SessionID, named.Name()), + } + if platform != nil { + ociOpt = append(ociOpt, llb.Platform(*platform)) + } + st := llb.OCILayout( + dummyRef.String(), + ociOpt..., + ) + st, err = st.WithImageConfig(data) + if err != nil { + return nil, nil, err + } + return &st, &img, nil case "local": st := llb.Local(vv[1], llb.SessionID(c.BuildOpts().SessionID), llb.FollowPaths([]string{dockerignoreFilename}), - llb.SharedKeyHint("context:"+name+"-"+dockerignoreFilename), - llb.WithCustomName("[context "+name+"] load "+dockerignoreFilename), + llb.SharedKeyHint("context:"+pname+"-"+dockerignoreFilename), + llb.WithCustomName("[context "+pname+"] load "+dockerignoreFilename), llb.Differ(llb.DiffNone, false), ) def, err := st.Marshal(ctx) if err != nil { - return nil, nil, nil, err + return nil, nil, err } res, err := c.Solve(ctx, client.SolveRequest{ Evaluate: true, Definition: def.ToPB(), }) if err != nil { - return nil, nil, nil, err + return nil, nil, err } ref, err := res.SingleRef() if err != nil { - return nil, nil, nil, err + return nil, nil, err } dt, _ := ref.ReadFile(ctx, client.ReadRequest{ Filename: dockerignoreFilename, @@ -930,58 +1055,46 @@ func contextByName(ctx context.Context, c client.Client, name string, platform * if len(dt) != 0 { excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dt)) if err != nil { - return nil, nil, nil, err + return nil, nil, err } } st = llb.Local(vv[1], - llb.WithCustomName("[context "+name+"] load from client"), + llb.WithCustomName("[context "+pname+"] load from client"), llb.SessionID(c.BuildOpts().SessionID), - llb.SharedKeyHint("context:"+name), + llb.SharedKeyHint("context:"+pname), llb.ExcludePatterns(excludes), ) - return &st, nil, nil, nil + return &st, nil, nil case "input": inputs, err := c.Inputs(ctx) if err != nil { - return nil, nil, nil, err + return nil, nil, err } st, ok := inputs[vv[1]] if !ok { - return nil, nil, nil, errors.Errorf("invalid input %s for %s", vv[1], name) + return nil, nil, errors.Errorf("invalid input %s for %s", vv[1], pname) } - md, ok := opts["input-metadata:"+vv[1]] + md, ok := opts[inputMetadataPrefix+vv[1]] if ok { m := make(map[string][]byte) if err := json.Unmarshal([]byte(md), &m); err != nil { - return nil, nil, nil, errors.Wrapf(err, "failed to parse input metadata %s", md) - } - var bi *binfotypes.BuildInfo - if dtbi, ok := m[exptypes.ExporterBuildInfo]; ok { - var depbi binfotypes.BuildInfo - if err := json.Unmarshal(dtbi, &depbi); err != nil { - return nil, nil, nil, errors.Wrapf(err, "failed to parse buildinfo for %s", name) - } - bi = &binfotypes.BuildInfo{ - Deps: map[string]binfotypes.BuildInfo{ - strings.SplitN(vv[1], "::", 2)[0]: depbi, - }, - } + return nil, nil, errors.Wrapf(err, "failed to parse input metadata %s", md) } var img *dockerfile2llb.Image if dtic, ok := m[exptypes.ExporterImageConfigKey]; ok { st, err = st.WithImageConfig(dtic) if err != nil { - return nil, nil, nil, err + return nil, nil, err } if err := json.Unmarshal(dtic, &img); err != nil { - return nil, nil, nil, errors.Wrapf(err, "failed to parse image config for %s", name) + return nil, nil, errors.Wrapf(err, "failed to parse image config for %s", pname) } } - return &st, img, bi, nil + return &st, img, nil } - return &st, nil, nil, nil + return &st, nil, nil default: - return nil, nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], name) + return nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], pname) } } @@ -1011,3 +1124,15 @@ func wrapSource(err error, sm *llb.SourceMap, ranges []parser.Range) error { } return errdefs.WithSource(err, s) } + +func parseSourceDateEpoch(v string) (*time.Time, error) { + if v == "" { + return nil, nil + } + sde, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "invalid SOURCE_DATE_EPOCH: %s", v) + } + tm := time.Unix(sde, 0) + return &tm, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/subrequests.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/subrequests.go index 6d30b7b8cc..8449530238 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/subrequests.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/subrequests.go @@ -1,16 +1,19 @@ package builder import ( + "bytes" "context" "encoding/json" "github.com/moby/buildkit/frontend/gateway/client" "github.com/moby/buildkit/frontend/subrequests" + "github.com/moby/buildkit/frontend/subrequests/outline" + "github.com/moby/buildkit/frontend/subrequests/targets" "github.com/moby/buildkit/solver/errdefs" ) func checkSubRequest(ctx context.Context, opts map[string]string) (*client.Result, bool, error) { - req, ok := opts["requestid"] + req, ok := opts[keyRequestID] if !ok { return nil, false, nil } @@ -18,6 +21,8 @@ func checkSubRequest(ctx context.Context, opts map[string]string) (*client.Resul case subrequests.RequestSubrequestsDescribe: res, err := describe() return res, true, err + case outline.RequestSubrequestsOutline, targets.RequestTargets: // handled later + return nil, false, nil default: return nil, true, errdefs.NewUnsupportedSubrequestError(req) } @@ -25,15 +30,25 @@ func checkSubRequest(ctx context.Context, opts map[string]string) (*client.Resul func describe() (*client.Result, error) { all := []subrequests.Request{ + outline.SubrequestsOutlineDefinition, + targets.SubrequestsTargetsDefinition, subrequests.SubrequestsDescribeDefinition, } - dt, err := json.MarshalIndent(all, " ", "") + dt, err := json.MarshalIndent(all, "", " ") if err != nil { return nil, err } + + b := bytes.NewBuffer(nil) + if err := subrequests.PrintDescribe(dt, b); err != nil { + return nil, err + } + res := client.NewResult() res.Metadata = map[string][]byte{ "result.json": dt, + "result.txt": b.Bytes(), + "version": []byte(subrequests.SubrequestsDescribeDefinition.Version), } return res, nil } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go index a20cd4f95e..6476267e2d 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go @@ -13,22 +13,28 @@ import ( "sort" "strconv" "strings" + "time" "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" "github.com/docker/go-connections/nat" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/client/llb/imagemetaresolver" + "github.com/moby/buildkit/exporter/containerimage/image" "github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/moby/buildkit/frontend/dockerfile/shell" + "github.com/moby/buildkit/frontend/subrequests/outline" + "github.com/moby/buildkit/frontend/subrequests/targets" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" binfotypes "github.com/moby/buildkit/util/buildinfo/types" + "github.com/moby/buildkit/util/gitutil" "github.com/moby/buildkit/util/suggest" "github.com/moby/buildkit/util/system" "github.com/moby/sys/signal" + digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -39,9 +45,15 @@ const ( defaultContextLocalName = "context" historyComment = "buildkit.dockerfile.v0" - DefaultCopyImage = "docker/dockerfile-copy:v0.1.9@sha256:e8f159d3f00786604b93c675ee2783f8dc194bb565e61ca5788f6a6e9d304061" + sbomScanContext = "BUILDKIT_SBOM_SCAN_CONTEXT" + sbomScanStage = "BUILDKIT_SBOM_SCAN_STAGE" ) +var nonEnvArgs = map[string]struct{}{ + sbomScanContext: {}, + sbomScanStage: {}, +} + type ConvertOpt struct { Target string MetaResolver llb.ImageMetaResolver @@ -54,60 +66,127 @@ type ConvertOpt struct { // Empty slice means ignore cache for all stages. Nil doesn't disable cache. IgnoreCache []string // CacheIDNamespace scopes the IDs for different cache mounts - CacheIDNamespace string - ImageResolveMode llb.ResolveMode - TargetPlatform *ocispecs.Platform - BuildPlatforms []ocispecs.Platform - PrefixPlatform bool - ExtraHosts []llb.HostIP - ShmSize int64 - Ulimit []pb.Ulimit - CgroupParent string - ForceNetMode pb.NetMode - OverrideCopyImage string - LLBCaps *apicaps.CapSet - ContextLocalName string - SourceMap *llb.SourceMap - Hostname string - Warn func(short, url string, detail [][]byte, location *parser.Range) - ContextByName func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, *binfotypes.BuildInfo, error) + CacheIDNamespace string + ImageResolveMode llb.ResolveMode + TargetPlatform *ocispecs.Platform + BuildPlatforms []ocispecs.Platform + PrefixPlatform bool + ExtraHosts []llb.HostIP + ShmSize int64 + Ulimit []pb.Ulimit + CgroupParent string + ForceNetMode pb.NetMode + LLBCaps *apicaps.CapSet + ContextLocalName string + SourceMap *llb.SourceMap + Hostname string + SourceDateEpoch *time.Time + Warn func(short, url string, detail [][]byte, location *parser.Range) + ContextByName func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, error) } -func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, *binfotypes.BuildInfo, error) { - buildInfo := &binfotypes.BuildInfo{} +type SBOMTargets struct { + Core llb.State + Extras map[string]llb.State + + IgnoreCache bool +} + +func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, *SBOMTargets, error) { + ds, err := toDispatchState(ctx, dt, opt) + if err != nil { + return nil, nil, nil, err + } + + sbom := SBOMTargets{ + Core: ds.state, + Extras: map[string]llb.State{}, + } + if ds.scanContext { + sbom.Extras["context"] = ds.opt.buildContext + } + if ds.ignoreCache { + sbom.IgnoreCache = true + } + for _, dsi := range findReachable(ds) { + if ds != dsi && dsi.scanStage { + sbom.Extras[dsi.stageName] = dsi.state + if dsi.ignoreCache { + sbom.IgnoreCache = true + } + } + } + + return &ds.state, &ds.image, &sbom, nil +} + +func Dockefile2Outline(ctx context.Context, dt []byte, opt ConvertOpt) (*outline.Outline, error) { + ds, err := toDispatchState(ctx, dt, opt) + if err != nil { + return nil, err + } + o := ds.Outline(dt) + return &o, nil +} + +func ListTargets(ctx context.Context, dt []byte) (*targets.List, error) { + dockerfile, err := parser.Parse(bytes.NewReader(dt)) + if err != nil { + return nil, err + } + stages, _, err := instructions.Parse(dockerfile.AST) + if err != nil { + return nil, err + } + + l := &targets.List{ + Sources: [][]byte{dt}, + } + + for i, s := range stages { + t := targets.Target{ + Name: s.Name, + Description: s.Comment, + Default: i == len(stages)-1, + Base: s.BaseName, + Platform: s.Platform, + Location: toSourceLocation(s.Location), + } + l.Targets = append(l.Targets, t) + } + return l, nil +} + +func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchState, error) { contextByName := opt.ContextByName - opt.ContextByName = func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, *binfotypes.BuildInfo, error) { + opt.ContextByName = func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, error) { if !strings.EqualFold(name, "scratch") && !strings.EqualFold(name, "context") { if contextByName != nil { if p == nil { p = opt.TargetPlatform } - st, img, bi, err := contextByName(ctx, name, resolveMode, p) + st, img, err := contextByName(ctx, name, resolveMode, p) if err != nil { - return nil, nil, nil, err + return nil, nil, err } - if bi != nil && bi.Deps != nil { - for k := range bi.Deps { - if buildInfo.Deps == nil { - buildInfo.Deps = make(map[string]binfotypes.BuildInfo) - } - buildInfo.Deps[k] = bi.Deps[k] - } - } - return st, img, bi, nil + return st, img, nil } } - return nil, nil, nil, nil + return nil, nil, nil } if len(dt) == 0 { - return nil, nil, nil, errors.Errorf("the Dockerfile cannot be empty") + return nil, errors.Errorf("the Dockerfile cannot be empty") } if opt.ContextLocalName == "" { opt.ContextLocalName = defaultContextLocalName } + if opt.Warn == nil { + opt.Warn = func(string, string, [][]byte, *parser.Range) {} + } + platformOpt := buildPlatformOpt(&opt) optMetaArgs := getPlatformArgs(platformOpt) @@ -117,7 +196,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, dockerfile, err := parser.Parse(bytes.NewReader(dt)) if err != nil { - return nil, nil, nil, err + return nil, err } for _, w := range dockerfile.Warnings { @@ -128,17 +207,27 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, stages, metaArgs, err := instructions.Parse(dockerfile.AST) if err != nil { - return nil, nil, nil, err + return nil, err } shlex := shell.NewLex(dockerfile.EscapeToken) + outline := newOutlineCapture() for _, cmd := range metaArgs { for _, metaArg := range cmd.Args { - if metaArg.Value != nil { - *metaArg.Value, _ = shlex.ProcessWordWithMap(*metaArg.Value, metaArgsToMap(optMetaArgs)) + info := argInfo{definition: metaArg, location: cmd.Location()} + if v, ok := opt.BuildArgs[metaArg.Key]; !ok { + if metaArg.Value != nil { + *metaArg.Value, info.deps, _ = shlex.ProcessWordWithMatches(*metaArg.Value, metaArgsToMap(optMetaArgs)) + } + } else { + metaArg.Value = &v } - optMetaArgs = append(optMetaArgs, setKVValue(metaArg, opt.BuildArgs)) + optMetaArgs = append(optMetaArgs, metaArg) + if metaArg.Value != nil { + info.value = *metaArg.Value + } + outline.allArgs[metaArg.Key] = info } } @@ -151,12 +240,12 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, // set base state for every image for i, st := range stages { - name, err := shlex.ProcessWordWithMap(st.BaseName, metaArgsToMap(optMetaArgs)) + name, used, err := shlex.ProcessWordWithMatches(st.BaseName, metaArgsToMap(optMetaArgs)) if err != nil { - return nil, nil, nil, parser.WithLocation(err, st.Location) + return nil, parser.WithLocation(err, st.Location) } if name == "" { - return nil, nil, nil, parser.WithLocation(errors.Errorf("base name (%s) should not be blank", st.BaseName), st.Location) + return nil, parser.WithLocation(errors.Errorf("base name (%s) should not be blank", st.BaseName), st.Location) } st.BaseName = name @@ -166,31 +255,36 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, ctxPaths: make(map[string]struct{}), stageName: st.Name, prefixPlatform: opt.PrefixPlatform, + outline: outline.clone(), + epoch: opt.SourceDateEpoch, } if v := st.Platform; v != "" { - v, err := shlex.ProcessWordWithMap(v, metaArgsToMap(optMetaArgs)) + v, u, err := shlex.ProcessWordWithMatches(v, metaArgsToMap(optMetaArgs)) if err != nil { - return nil, nil, nil, parser.WithLocation(errors.Wrapf(err, "failed to process arguments for platform %s", v), st.Location) + return nil, parser.WithLocation(errors.Wrapf(err, "failed to process arguments for platform %s", v), st.Location) } p, err := platforms.Parse(v) if err != nil { - return nil, nil, nil, parser.WithLocation(errors.Wrapf(err, "failed to parse platform %s", v), st.Location) + return nil, parser.WithLocation(errors.Wrapf(err, "failed to parse platform %s", v), st.Location) + } + for k := range u { + used[k] = struct{}{} } ds.platform = &p } if st.Name != "" { - s, img, bi, err := opt.ContextByName(ctx, st.Name, opt.ImageResolveMode.String(), ds.platform) + s, img, err := opt.ContextByName(ctx, st.Name, opt.ImageResolveMode.String(), ds.platform) if err != nil { - return nil, nil, nil, err + return nil, err } if s != nil { ds.noinit = true ds.state = *s if img != nil { - ds.image = *img + ds.image = clampTimes(*img, opt.SourceDateEpoch) if img.Architecture != "" && img.OS != "" { ds.platform = &ocispecs.Platform{ OS: img.OS, @@ -199,9 +293,6 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } } } - if bi != nil { - ds.buildInfo = *bi - } allDispatchStates.addState(ds) continue } @@ -213,6 +304,10 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, allDispatchStates.addState(ds) + for k := range used { + ds.outline.usedArgs[k] = struct{}{} + } + total := 0 if ds.stage.BaseName != emptyImageName && ds.base == nil { total = 1 @@ -222,9 +317,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, case *instructions.AddCommand, *instructions.CopyCommand, *instructions.RunCommand: total++ case *instructions.WorkdirCommand: - if useFileOp(opt.BuildArgs, opt.LLBCaps) { - total++ - } + total++ } } ds.cmdTotal = total @@ -249,7 +342,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, var ok bool target, ok = allDispatchStates.findStateByName(opt.Target) if !ok { - return nil, nil, nil, errors.Errorf("target stage %s could not be found", opt.Target) + return nil, errors.Errorf("target stage %s could not be found", opt.Target) } } @@ -259,7 +352,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, for i, cmd := range d.stage.Commands { newCmd, err := toCommand(cmd, allDispatchStates) if err != nil { - return nil, nil, nil, err + return nil, err } d.commands[i] = newCmd for _, src := range newCmd.sources { @@ -274,7 +367,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } if has, state := hasCircularDependency(allDispatchStates.states); has { - return nil, nil, nil, errors.Errorf("circular dependency detected on stage: %s", state.stageName) + return nil, errors.Errorf("circular dependency detected on stage: %s", state.stageName) } if len(allDispatchStates.states) == 1 { @@ -317,7 +410,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, d.stage.BaseName = reference.TagNameOnly(ref).String() var isScratch bool - st, img, bi, err := opt.ContextByName(ctx, d.stage.BaseName, opt.ImageResolveMode.String(), platform) + st, img, err := opt.ContextByName(ctx, d.stage.BaseName, opt.ImageResolveMode.String(), platform) if err != nil { return err } @@ -327,9 +420,6 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } else { d.image = emptyImage(platformOpt.targetPlatform) } - if bi != nil { - d.buildInfo = *bi - } d.state = st.Platform(*platform) d.platform = platform return nil @@ -341,9 +431,10 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } prefix += "internal]" dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, llb.ResolveImageConfigOpt{ - Platform: platform, - ResolveMode: opt.ImageResolveMode.String(), - LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName), + Platform: platform, + ResolveMode: opt.ImageResolveMode.String(), + LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName), + ResolverType: llb.ResolverTypeRegistry, }) if err != nil { return suggest.WrapError(errors.Wrap(err, origName), origName, append(allStageNames, commonImageNames()...), true) @@ -406,7 +497,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } if err := eg.Wait(); err != nil { - return nil, nil, nil, err + return nil, err } buildContext := &mutableOutput{} @@ -417,19 +508,6 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, continue } - // collect build sources and dependencies - if len(d.buildInfo.Sources) > 0 { - buildInfo.Sources = append(buildInfo.Sources, d.buildInfo.Sources...) - } - if d.buildInfo.Deps != nil { - for name, bi := range d.buildInfo.Deps { - if buildInfo.Deps == nil { - buildInfo.Deps = make(map[string]binfotypes.BuildInfo) - } - buildInfo.Deps[name] = bi - } - } - if d.base != nil { d.state = d.base.state d.platform = d.base.platform @@ -438,11 +516,11 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, // make sure that PATH is always set if _, ok := shell.BuildEnvs(d.image.Config.Env)["PATH"]; !ok { - var os string + var pathOS string if d.platform != nil { - os = d.platform.OS + pathOS = d.platform.OS } - d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv(os)) + d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv(pathOS)) } // initialize base metadata from image conf @@ -455,12 +533,12 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } if d.image.Config.WorkingDir != "" { if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false, nil); err != nil { - return nil, nil, nil, parser.WithLocation(err, d.stage.Location) + return nil, parser.WithLocation(err, d.stage.Location) } } if d.image.Config.User != "" { if err = dispatchUser(d, &instructions.UserCommand{User: d.image.Config.User}, false); err != nil { - return nil, nil, nil, parser.WithLocation(err, d.stage.Location) + return nil, parser.WithLocation(err, d.stage.Location) } } d.state = d.state.Network(opt.ForceNetMode) @@ -480,35 +558,37 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, shmSize: opt.ShmSize, ulimit: opt.Ulimit, cgroupParent: opt.CgroupParent, - copyImage: opt.OverrideCopyImage, llbCaps: opt.LLBCaps, sourceMap: opt.SourceMap, } - if opt.copyImage == "" { - opt.copyImage = DefaultCopyImage - } if err = dispatchOnBuildTriggers(d, d.image.Config.OnBuild, opt); err != nil { - return nil, nil, nil, parser.WithLocation(err, d.stage.Location) + return nil, parser.WithLocation(err, d.stage.Location) } d.image.Config.OnBuild = nil for _, cmd := range d.commands { if err := dispatch(d, cmd, opt); err != nil { - return nil, nil, nil, parser.WithLocation(err, cmd.Location()) + return nil, parser.WithLocation(err, cmd.Location()) } } + d.opt = opt for p := range d.ctxPaths { ctxPaths[p] = struct{}{} } - } - // sort build sources - if len(buildInfo.Sources) > 0 { - sort.Slice(buildInfo.Sources, func(i, j int) bool { - return buildInfo.Sources[i].Ref < buildInfo.Sources[j].Ref - }) + locals := []instructions.KeyValuePairOptional{} + locals = append(locals, d.opt.metaArgs...) + locals = append(locals, d.buildArgs...) + for _, a := range locals { + switch a.Key { + case sbomScanStage: + d.scanStage = isEnabledForStage(d.stageName, a.ValueString()) + case sbomScanContext: + d.scanContext = isEnabledForStage(d.stageName, a.ValueString()) + } + } } if len(opt.Labels) != 0 && target.image.Config.Labels == nil { @@ -540,7 +620,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, if opt.LLBCaps != nil { defaults = append(defaults, llb.WithCaps(*opt.LLBCaps)) } - st := target.state.SetMarshalDefaults(defaults...) + target.state = target.state.SetMarshalDefaults(defaults...) if !platformOpt.implicitTarget { target.image.OS = platformOpt.targetPlatform.OS @@ -548,7 +628,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, target.image.Variant = platformOpt.targetPlatform.Variant } - return &st, &target.image, buildInfo, nil + return target, nil } func metaArgsToMap(metaArgs []instructions.KeyValuePairOptional) map[string]string { @@ -608,7 +688,6 @@ type dispatchOpt struct { shmSize int64 ulimit []pb.Ulimit cgroupParent string - copyImage string llbCaps *apicaps.CapSet sourceMap *llb.SourceMap } @@ -653,17 +732,25 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { case *instructions.WorkdirCommand: err = dispatchWorkdir(d, c, true, &opt) case *instructions.AddCommand: - err = dispatchCopy(d, copyConfig{ - params: c.SourcesAndDest, - source: opt.buildContext, - isAddCommand: true, - cmdToPrint: c, - chown: c.Chown, - chmod: c.Chmod, - link: c.Link, - location: c.Location(), - opt: opt, - }) + var checksum digest.Digest + if c.Checksum != "" { + checksum, err = digest.Parse(c.Checksum) + } + if err == nil { + err = dispatchCopy(d, copyConfig{ + params: c.SourcesAndDest, + source: opt.buildContext, + isAddCommand: true, + cmdToPrint: c, + chown: c.Chown, + chmod: c.Chmod, + link: c.Link, + keepGitDir: c.KeepGitDir, + checksum: checksum, + location: c.Location(), + opt: opt, + }) + } if err == nil { for _, src := range c.SourcePaths { if !strings.HasPrefix(src, "http://") && !strings.HasPrefix(src, "https://") { @@ -720,6 +807,7 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { } type dispatchState struct { + opt dispatchOpt state llb.State image Image platform *ocispecs.Platform @@ -738,6 +826,10 @@ type dispatchState struct { cmdTotal int prefixPlatform bool buildInfo binfotypes.BuildInfo + outline outlineCapture + epoch *time.Time + scanStage bool + scanContext bool } type dispatchStates struct { @@ -754,6 +846,7 @@ func (dss *dispatchStates) addState(ds *dispatchState) { if d, ok := dss.statesByName[ds.stage.BaseName]; ok { ds.base = d + ds.outline = d.outline.clone() } if ds.stage.Name != "" { dss.statesByName[strings.ToLower(ds.stage.Name)] = ds @@ -813,7 +906,7 @@ func dispatchEnv(d *dispatchState, c *instructions.EnvCommand) error { d.state = d.state.AddEnv(e.Key, e.Value) d.image.Config.Env = addEnv(d.image.Config.Env, e.Key, e.Value) } - return commitToHistory(&d.image, commitMessage.String(), false, nil) + return commitToHistory(&d.image, commitMessage.String(), false, nil, d.epoch) } func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyEnv, sources []*dispatchState, dopt dispatchOpt) error { @@ -824,7 +917,7 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE var args []string = c.CmdLine if len(c.Files) > 0 { if len(args) != 1 || !c.PrependShell { - return fmt.Errorf("parsing produced an invalid run command: %v", args) + return errors.Errorf("parsing produced an invalid run command: %v", args) } if heredoc := parser.MustParseHeredoc(args[0]); heredoc != nil { @@ -943,7 +1036,7 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE } d.state = d.state.Run(opt...).Root() - return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs, shell.BuildEnvs(env)), true, &d.state) + return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs, shell.BuildEnvs(env)), true, &d.state, d.epoch) } func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool, opt *dispatchOpt) error { @@ -955,7 +1048,7 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo d.image.Config.WorkingDir = wd if commit { withLayer := false - if wd != "/" && opt != nil && useFileOp(opt.buildArgValues, opt.llbCaps) { + if wd != "/" { mkdirOpt := []llb.MkdirOption{llb.WithParents(true)} if user := d.image.Config.User; user != "" { mkdirOpt = append(mkdirOpt, llb.WithUser(user)) @@ -974,12 +1067,12 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo ) withLayer = true } - return commitToHistory(&d.image, "WORKDIR "+wd, withLayer, nil) + return commitToHistory(&d.image, "WORKDIR "+wd, withLayer, nil, d.epoch) } return nil } -func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error { +func dispatchCopy(d *dispatchState, cfg copyConfig) error { pp, err := pathRelativeToWorkingDir(d.state, cfg.params.DestPath) if err != nil { return err @@ -1004,6 +1097,21 @@ func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error { } } + if cfg.checksum != "" { + if !cfg.isAddCommand { + return errors.New("checksum can't be specified for COPY") + } + if !addChecksumEnabled { + return errors.New("instruction 'ADD --checksum=' requires the labs channel") + } + if len(cfg.params.SourcePaths) != 1 { + return errors.New("checksum can't be specified for multiple sources") + } + if !isHTTPSource(cfg.params.SourcePaths[0]) { + return errors.New("checksum can't be specified for non-HTTP sources") + } + } + commitMessage := bytes.NewBufferString("") if cfg.isAddCommand { commitMessage.WriteString("ADD") @@ -1015,7 +1123,34 @@ func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error { for _, src := range cfg.params.SourcePaths { commitMessage.WriteString(" " + src) - if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { + gitRef, gitRefErr := gitutil.ParseGitRef(src) + if gitRefErr == nil && !gitRef.IndistinguishableFromLocal { + if !cfg.isAddCommand { + return errors.New("source can't be a git ref for COPY") + } + if !addGitEnabled { + return errors.New("instruction ADD requires the labs channel") + } + // TODO: print a warning (not an error) if gitRef.UnencryptedTCP is true + commit := gitRef.Commit + if gitRef.SubDir != "" { + commit += ":" + gitRef.SubDir + } + var gitOptions []llb.GitOption + if cfg.keepGitDir { + gitOptions = append(gitOptions, llb.KeepGitDir()) + } + st := llb.Git(gitRef.Remote, commit, gitOptions...) + opts := append([]llb.CopyOption{&llb.CopyInfo{ + Mode: mode, + CreateDestPath: true, + }}, copyOpt...) + if a == nil { + a = llb.Copy(st, "/", dest, opts...) + } else { + a = a.Copy(st, "/", dest, opts...) + } + } else if isHTTPSource(src) { if !cfg.isAddCommand { return errors.New("source can't be a URL for COPY") } @@ -1033,7 +1168,7 @@ func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error { } } - st := llb.HTTP(src, llb.Filename(f), dfCmd(cfg.params)) + st := llb.HTTP(src, llb.Filename(f), llb.Checksum(cfg.checksum), dfCmd(cfg.params)) opts := append([]llb.CopyOption{&llb.CopyInfo{ Mode: mode, @@ -1107,7 +1242,8 @@ func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error { fileOpt = append(fileOpt, llb.IgnoreCache) } - if cfg.opt.llbCaps.Supports(pb.CapMergeOp) == nil && cfg.link && cfg.chmod == "" { + // cfg.opt.llbCaps can be nil in unit tests + if cfg.opt.llbCaps != nil && cfg.opt.llbCaps.Supports(pb.CapMergeOp) == nil && cfg.link && cfg.chmod == "" { pgID := identity.NewID() d.cmdIndex-- // prefixCommand increases it pgName := prefixCommand(d, name, d.prefixPlatform, &platform, env) @@ -1126,7 +1262,7 @@ func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error { d.state = d.state.File(a, fileOpt...) } - return commitToHistory(&d.image, commitMessage.String(), true, &d.state) + return commitToHistory(&d.image, commitMessage.String(), true, &d.state, d.epoch) } type copyConfig struct { @@ -1137,136 +1273,15 @@ type copyConfig struct { chown string chmod string link bool + keepGitDir bool + checksum digest.Digest location []parser.Range opt dispatchOpt } -func dispatchCopy(d *dispatchState, cfg copyConfig) error { - if useFileOp(cfg.opt.buildArgValues, cfg.opt.llbCaps) { - return dispatchCopyFileOp(d, cfg) - } - - if len(cfg.params.SourceContents) > 0 { - return errors.New("inline content copy is not supported") - } - - if cfg.chmod != "" { - if cfg.opt.llbCaps != nil && cfg.opt.llbCaps.Supports(pb.CapFileBase) != nil { - return errors.Wrap(cfg.opt.llbCaps.Supports(pb.CapFileBase), "chmod is not supported") - } - return errors.New("chmod is not supported") - } - - img := llb.Image(cfg.opt.copyImage, llb.MarkImageInternal, llb.Platform(cfg.opt.buildPlatforms[0]), WithInternalName("helper image for file operations")) - pp, err := pathRelativeToWorkingDir(d.state, cfg.params.DestPath) - if err != nil { - return err - } - dest := path.Join(".", pp) - if cfg.params.DestPath == "." || cfg.params.DestPath == "" || cfg.params.DestPath[len(cfg.params.DestPath)-1] == filepath.Separator { - dest += string(filepath.Separator) - } - args := []string{"copy"} - unpack := cfg.isAddCommand - - mounts := make([]llb.RunOption, 0, len(cfg.params.SourcePaths)) - if cfg.chown != "" { - args = append(args, fmt.Sprintf("--chown=%s", cfg.chown)) - _, _, err := parseUser(cfg.chown) - if err != nil { - mounts = append(mounts, llb.AddMount("/etc/passwd", d.state, llb.SourcePath("/etc/passwd"), llb.Readonly)) - mounts = append(mounts, llb.AddMount("/etc/group", d.state, llb.SourcePath("/etc/group"), llb.Readonly)) - } - } - - commitMessage := bytes.NewBufferString("") - if cfg.isAddCommand { - commitMessage.WriteString("ADD") - } else { - commitMessage.WriteString("COPY") - } - - for i, src := range cfg.params.SourcePaths { - commitMessage.WriteString(" " + src) - if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { - if !cfg.isAddCommand { - return errors.New("source can't be a URL for COPY") - } - - // Resources from remote URLs are not decompressed. - // https://docs.docker.com/engine/reference/builder/#add - // - // Note: mixing up remote archives and local archives in a single ADD instruction - // would result in undefined behavior: https://github.com/moby/buildkit/pull/387#discussion_r189494717 - unpack = false - u, err := url.Parse(src) - f := "__unnamed__" - if err == nil { - if base := path.Base(u.Path); base != "." && base != "/" { - f = base - } - } - target := path.Join(fmt.Sprintf("/src-%d", i), f) - args = append(args, target) - mounts = append(mounts, llb.AddMount(path.Dir(target), llb.HTTP(src, llb.Filename(f), dfCmd(cfg.params)), llb.Readonly)) - } else { - d, f := splitWildcards(src) - targetCmd := fmt.Sprintf("/src-%d", i) - targetMount := targetCmd - if f == "" { - f = path.Base(src) - targetMount = path.Join(targetMount, f) - } - targetCmd = path.Join(targetCmd, f) - args = append(args, targetCmd) - mounts = append(mounts, llb.AddMount(targetMount, cfg.source, llb.SourcePath(d), llb.Readonly)) - } - } - - commitMessage.WriteString(" " + cfg.params.DestPath) - - args = append(args, dest) - if unpack { - args = append(args[:1], append([]string{"--unpack"}, args[1:]...)...) - } - - platform := cfg.opt.targetPlatform - if d.platform != nil { - platform = *d.platform - } - - env, err := d.state.Env(context.TODO()) - if err != nil { - return err - } - - runOpt := []llb.RunOption{ - llb.Args(args), - llb.Dir("/dest"), - llb.ReadonlyRootFS(), - dfCmd(cfg.cmdToPrint), - llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(cfg.opt.shlex, cfg.cmdToPrint.String(), env)), d.prefixPlatform, &platform, env)), - location(cfg.opt.sourceMap, cfg.location), - } - if d.ignoreCache { - runOpt = append(runOpt, llb.IgnoreCache) - } - - if cfg.opt.llbCaps != nil { - if err := cfg.opt.llbCaps.Supports(pb.CapExecMetaNetwork); err == nil { - runOpt = append(runOpt, llb.Network(llb.NetModeNone)) - } - } - - run := img.Run(append(runOpt, mounts...)...) - d.state = run.AddMount("/dest", d.state).Platform(platform) - - return commitToHistory(&d.image, commitMessage.String(), true, &d.state) -} - func dispatchMaintainer(d *dispatchState, c *instructions.MaintainerCommand) error { d.image.Author = c.Maintainer - return commitToHistory(&d.image, fmt.Sprintf("MAINTAINER %v", c.Maintainer), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("MAINTAINER %v", c.Maintainer), false, nil, d.epoch) } func dispatchLabel(d *dispatchState, c *instructions.LabelCommand) error { @@ -1278,7 +1293,7 @@ func dispatchLabel(d *dispatchState, c *instructions.LabelCommand) error { d.image.Config.Labels[v.Key] = v.Value commitMessage.WriteString(" " + v.String()) } - return commitToHistory(&d.image, commitMessage.String(), false, nil) + return commitToHistory(&d.image, commitMessage.String(), false, nil, d.epoch) } func dispatchOnbuild(d *dispatchState, c *instructions.OnbuildCommand) error { @@ -1294,7 +1309,7 @@ func dispatchCmd(d *dispatchState, c *instructions.CmdCommand) error { d.image.Config.Cmd = args d.image.Config.ArgsEscaped = true d.cmdSet = true - return commitToHistory(&d.image, fmt.Sprintf("CMD %q", args), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("CMD %q", args), false, nil, d.epoch) } func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) error { @@ -1306,18 +1321,18 @@ func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) err if !d.cmdSet { d.image.Config.Cmd = nil } - return commitToHistory(&d.image, fmt.Sprintf("ENTRYPOINT %q", args), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("ENTRYPOINT %q", args), false, nil, d.epoch) } func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) error { - d.image.Config.Healthcheck = &HealthConfig{ + d.image.Config.Healthcheck = &image.HealthConfig{ Test: c.Health.Test, Interval: c.Health.Interval, Timeout: c.Health.Timeout, StartPeriod: c.Health.StartPeriod, Retries: c.Health.Retries, } - return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil, d.epoch) } func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shell.Lex) error { @@ -1347,14 +1362,14 @@ func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shel d.image.Config.ExposedPorts[string(p)] = struct{}{} } - return commitToHistory(&d.image, fmt.Sprintf("EXPOSE %v", ps), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("EXPOSE %v", ps), false, nil, d.epoch) } func dispatchUser(d *dispatchState, c *instructions.UserCommand, commit bool) error { d.state = d.state.User(c.User) d.image.Config.User = c.User if commit { - return commitToHistory(&d.image, fmt.Sprintf("USER %v", c.User), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("USER %v", c.User), false, nil, d.epoch) } return nil } @@ -1369,7 +1384,7 @@ func dispatchVolume(d *dispatchState, c *instructions.VolumeCommand) error { } d.image.Config.Volumes[v] = struct{}{} } - return commitToHistory(&d.image, fmt.Sprintf("VOLUME %v", c.Volumes), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("VOLUME %v", c.Volumes), false, nil, d.epoch) } func dispatchStopSignal(d *dispatchState, c *instructions.StopSignalCommand) error { @@ -1377,12 +1392,12 @@ func dispatchStopSignal(d *dispatchState, c *instructions.StopSignalCommand) err return err } d.image.Config.StopSignal = c.Signal - return commitToHistory(&d.image, fmt.Sprintf("STOPSIGNAL %v", c.Signal), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("STOPSIGNAL %v", c.Signal), false, nil, d.epoch) } func dispatchShell(d *dispatchState, c *instructions.ShellCommand) error { d.image.Config.Shell = c.Shell - return commitToHistory(&d.image, fmt.Sprintf("SHELL %v", c.Shell), false, nil) + return commitToHistory(&d.image, fmt.Sprintf("SHELL %v", c.Shell), false, nil, d.epoch) } func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instructions.KeyValuePairOptional, buildArgValues map[string]string) error { @@ -1395,21 +1410,34 @@ func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instru commitStr += "=" + *arg.Value } commitStrs = append(commitStrs, commitStr) + + skipArgInfo := false // skip the arg info if the arg is inherited from global scope if buildArg.Value == nil { for _, ma := range metaArgs { if ma.Key == buildArg.Key { buildArg.Value = ma.Value + skipArgInfo = true } } } + ai := argInfo{definition: arg, location: c.Location()} + if buildArg.Value != nil { - d.state = d.state.AddEnv(buildArg.Key, *buildArg.Value) + if _, ok := nonEnvArgs[buildArg.Key]; !ok { + d.state = d.state.AddEnv(buildArg.Key, *buildArg.Value) + } + ai.value = *buildArg.Value } + if !skipArgInfo { + d.outline.allArgs[arg.Key] = ai + } + d.outline.usedArgs[arg.Key] = struct{}{} + d.buildArgs = append(d.buildArgs, buildArg) } - return commitToHistory(&d.image, "ARG "+strings.Join(commitStrs, " "), false, nil) + return commitToHistory(&d.image, "ARG "+strings.Join(commitStrs, " "), false, nil, d.epoch) } func pathRelativeToWorkingDir(s llb.State, p string) (string, error) { @@ -1423,27 +1451,6 @@ func pathRelativeToWorkingDir(s llb.State, p string) (string, error) { return path.Join(dir, p), nil } -func splitWildcards(name string) (string, string) { - i := 0 - for ; i < len(name); i++ { - ch := name[i] - if ch == '\\' { - i++ - } else if ch == '*' || ch == '?' || ch == '[' { - break - } - } - if i == len(name) { - return name, "" - } - - base := path.Base(name[:i]) - if name[:i] == "" || strings.HasSuffix(name[:i], string(filepath.Separator)) { - base = "" - } - return path.Dir(name[:i]), base + name[i:] -} - func addEnv(env []string, k, v string) []string { gotOne := false for i, envVar := range env { @@ -1507,7 +1514,7 @@ func runCommandString(args []string, buildArgs []instructions.KeyValuePairOption return strings.Join(append(tmpBuildEnv, args...), " ") } -func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State) error { +func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State, tm *time.Time) error { if st != nil { msg += " # buildkit" } @@ -1516,6 +1523,7 @@ func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State) erro CreatedBy: msg, Comment: historyComment, EmptyLayer: !withLayer, + Created: tm, }) return nil } @@ -1535,6 +1543,20 @@ func isReachable(from, to *dispatchState) (ret bool) { return false } +func findReachable(from *dispatchState) (ret []*dispatchState) { + if from == nil { + return nil + } + ret = append(ret, from) + if from.base != nil { + ret = append(ret, findReachable(from.base)...) + } + for d := range from.deps { + ret = append(ret, findReachable(d)...) + } + return ret +} + func hasCircularDependency(states []*dispatchState) (bool, *dispatchState) { var visit func(state *dispatchState) bool if states == nil { @@ -1570,42 +1592,6 @@ func hasCircularDependency(states []*dispatchState) (bool, *dispatchState) { return false, nil } -func parseUser(str string) (uid uint32, gid uint32, err error) { - if str == "" { - return 0, 0, nil - } - parts := strings.SplitN(str, ":", 2) - for i, v := range parts { - switch i { - case 0: - uid, err = parseUID(v) - if err != nil { - return 0, 0, err - } - if len(parts) == 1 { - gid = uid - } - case 1: - gid, err = parseUID(v) - if err != nil { - return 0, 0, err - } - } - } - return -} - -func parseUID(str string) (uint32, error) { - if str == "root" { - return 0, nil - } - uid, err := strconv.ParseUint(str, 10, 32) - if err != nil { - return 0, err - } - return uint32(uid), nil -} - func normalizeContextPaths(paths map[string]struct{}) []string { pathSlice := make([]string, 0, len(paths)) for p := range paths { @@ -1770,16 +1756,6 @@ func platformFromEnv(env []string) *ocispecs.Platform { return &p } -func useFileOp(args map[string]string, caps *apicaps.CapSet) bool { - enabled := true - if v, ok := args["BUILDKIT_DISABLE_FILEOP"]; ok { - if b, err := strconv.ParseBool(v); err == nil { - enabled = !b - } - } - return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil -} - func location(sm *llb.SourceMap, locations []parser.Range) llb.ConstraintsOpt { loc := make([]*pb.Range, 0, len(locations)) for _, l := range locations { @@ -1817,3 +1793,36 @@ func commonImageNames() []string { } return out } + +func clampTimes(img Image, tm *time.Time) Image { + if tm == nil { + return img + } + for i, h := range img.History { + if h.Created == nil || h.Created.After(*tm) { + img.History[i].Created = tm + } + } + if img.Created != nil && img.Created.After(*tm) { + img.Created = tm + } + return img +} + +func isHTTPSource(src string) bool { + return strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") +} + +func isEnabledForStage(stage string, value string) bool { + if enabled, err := strconv.ParseBool(value); err == nil { + return enabled + } + + vv := strings.Split(value, ",") + for _, v := range vv { + if v == stage { + return true + } + } + return false +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go new file mode 100644 index 0000000000..4506baeb8b --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go @@ -0,0 +1,6 @@ +//go:build dfaddchecksum +// +build dfaddchecksum + +package dockerfile2llb + +const addChecksumEnabled = true diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addgit.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addgit.go new file mode 100644 index 0000000000..9ccb7a20e8 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_addgit.go @@ -0,0 +1,6 @@ +//go:build dfaddgit +// +build dfaddgit + +package dockerfile2llb + +const addGitEnabled = true diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go new file mode 100644 index 0000000000..8de035297c --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go @@ -0,0 +1,6 @@ +//go:build !dfaddchecksum +// +build !dfaddchecksum + +package dockerfile2llb + +const addChecksumEnabled = false diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go new file mode 100644 index 0000000000..119bb32c88 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go @@ -0,0 +1,6 @@ +//go:build !dfaddgit +// +build !dfaddgit + +package dockerfile2llb + +const addGitEnabled = false diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go index 7777fba91a..1015590a0d 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go @@ -2,12 +2,9 @@ package dockerfile2llb import ( "context" - "fmt" "os" "path" "path/filepath" - "strconv" - "strings" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/frontend/dockerfile/instructions" @@ -46,7 +43,7 @@ func detectRunMount(cmd *command, allDispatchStates *dispatchStates) bool { return false } -func setCacheUIDGIDFileOp(m *instructions.Mount, st llb.State) llb.State { +func setCacheUIDGID(m *instructions.Mount, st llb.State) llb.State { uid := 0 gid := 0 mode := os.FileMode(0755) @@ -62,24 +59,6 @@ func setCacheUIDGIDFileOp(m *instructions.Mount, st llb.State) llb.State { return st.File(llb.Mkdir("/cache", mode, llb.WithUIDGID(uid, gid)), llb.WithCustomName("[internal] settings cache mount permissions")) } -func setCacheUIDGID(m *instructions.Mount, st llb.State, fileop bool) llb.State { - if fileop { - return setCacheUIDGIDFileOp(m, st) - } - - var b strings.Builder - if m.UID != nil { - b.WriteString(fmt.Sprintf("chown %d /mnt/cache;", *m.UID)) - } - if m.GID != nil { - b.WriteString(fmt.Sprintf("chown :%d /mnt/cache;", *m.GID)) - } - if m.Mode != nil { - b.WriteString(fmt.Sprintf("chmod %s /mnt/cache;", strconv.FormatUint(*m.Mode, 8))) - } - return llb.Image("busybox").Run(llb.Shlex(fmt.Sprintf("sh -c 'mkdir -p /mnt/cache;%s'", b.String())), llb.WithCustomName("[internal] settings cache mount permissions")).AddMount("/mnt", st) -} - func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*dispatchState, opt dispatchOpt) ([]llb.RunOption, error) { var out []llb.RunOption mounts := instructions.GetMounts(c) @@ -100,7 +79,7 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []* )) } if mount.Type == instructions.MountTypeSecret { - secret, err := dispatchSecret(mount) + secret, err := dispatchSecret(d, mount, c.Location()) if err != nil { return nil, err } @@ -108,7 +87,7 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []* continue } if mount.Type == instructions.MountTypeSSH { - ssh, err := dispatchSSH(mount) + ssh, err := dispatchSSH(d, mount, c.Location()) if err != nil { return nil, err } @@ -148,7 +127,7 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []* mountOpts = append(mountOpts, llb.SourcePath(src)) } else { if mount.UID != nil || mount.GID != nil || mount.Mode != nil { - st = setCacheUIDGID(mount, st, useFileOp(opt.buildArgValues, opt.llbCaps)) + st = setCacheUIDGID(mount, st) mountOpts = append(mountOpts, llb.SourcePath("/cache")) } } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go index 2c88a5e4f7..ced2bff1b0 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go @@ -5,10 +5,11 @@ import ( "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/pkg/errors" ) -func dispatchSecret(m *instructions.Mount) (llb.RunOption, error) { +func dispatchSecret(d *dispatchState, m *instructions.Mount, loc []parser.Range) (llb.RunOption, error) { id := m.CacheID if m.Source != "" { id = m.Source @@ -26,6 +27,13 @@ func dispatchSecret(m *instructions.Mount) (llb.RunOption, error) { target = "/run/secrets/" + path.Base(id) } + if _, ok := d.outline.secrets[id]; !ok { + d.outline.secrets[id] = secretInfo{ + location: loc, + required: m.Required, + } + } + opts := []llb.SecretOption{llb.SecretID(id)} if !m.Required { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go index b55659d978..ab7aaa6012 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go @@ -3,13 +3,26 @@ package dockerfile2llb import ( "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/pkg/errors" ) -func dispatchSSH(m *instructions.Mount) (llb.RunOption, error) { +func dispatchSSH(d *dispatchState, m *instructions.Mount, loc []parser.Range) (llb.RunOption, error) { if m.Source != "" { return nil, errors.Errorf("ssh does not support source") } + + id := m.CacheID + if id == "" { + id = "default" + } + if _, ok := d.outline.ssh[id]; !ok { + d.outline.ssh[id] = sshInfo{ + location: loc, + required: m.Required, + } + } + opts := []llb.SSHOption{llb.SSHID(m.CacheID)} if m.Target != "" { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go deleted file mode 100644 index 3cf982b9a9..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/directives.go +++ /dev/null @@ -1,55 +0,0 @@ -package dockerfile2llb - -import ( - "bufio" - "io" - "regexp" - "strings" - - "github.com/moby/buildkit/frontend/dockerfile/parser" -) - -const keySyntax = "syntax" - -var reDirective = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`) - -type Directive struct { - Name string - Value string - Location []parser.Range -} - -func DetectSyntax(r io.Reader) (string, string, []parser.Range, bool) { - directives := ParseDirectives(r) - if len(directives) == 0 { - return "", "", nil, false - } - v, ok := directives[keySyntax] - if !ok { - return "", "", nil, false - } - p := strings.SplitN(v.Value, " ", 2) - return p[0], v.Value, v.Location, true -} - -func ParseDirectives(r io.Reader) map[string]Directive { - m := map[string]Directive{} - s := bufio.NewScanner(r) - var l int - for s.Scan() { - l++ - match := reDirective.FindStringSubmatch(s.Text()) - if len(match) == 0 { - return m - } - m[strings.ToLower(match[1])] = Directive{ - Name: match[1], - Value: match[2], - Location: []parser.Range{{ - Start: parser.Position{Line: l}, - End: parser.Position{Line: l}, - }}, - } - } - return m -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go index d4c82700e3..36b27aa28a 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go @@ -1,59 +1,14 @@ package dockerfile2llb import ( - "time" - - "github.com/docker/docker/api/types/strslice" + "github.com/moby/buildkit/exporter/containerimage/image" "github.com/moby/buildkit/util/system" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// ImageConfig is a docker compatible config for an image -type ImageConfig struct { - ocispecs.ImageConfig - - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - - // NetworkDisabled bool `json:",omitempty"` // Is network disabled - // MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - // Image is the JSON structure which describes some basic information about the image. // This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. -type Image struct { - ocispecs.Image - - // Config defines the execution parameters which should be used as a base when running a container using the image. - Config ImageConfig `json:"config,omitempty"` - - // Variant defines platform variant. To be added to OCI. - Variant string `json:"variant,omitempty"` -} +type Image image.Image func clone(src Image) Image { img := src @@ -69,8 +24,8 @@ func emptyImage(platform ocispecs.Platform) Image { Image: ocispecs.Image{ Architecture: platform.Architecture, OS: platform.OS, + Variant: platform.Variant, }, - Variant: platform.Variant, } img.RootFS.Type = "layers" img.Config.WorkingDir = "/" diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/outline.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/outline.go new file mode 100644 index 0000000000..f93c8961b2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/outline.go @@ -0,0 +1,210 @@ +package dockerfile2llb + +import ( + "sort" + + "github.com/moby/buildkit/frontend/dockerfile/instructions" + "github.com/moby/buildkit/frontend/dockerfile/parser" + "github.com/moby/buildkit/frontend/subrequests/outline" + pb "github.com/moby/buildkit/solver/pb" +) + +type outlineCapture struct { + allArgs map[string]argInfo + usedArgs map[string]struct{} + secrets map[string]secretInfo + ssh map[string]sshInfo +} + +type argInfo struct { + value string + definition instructions.KeyValuePairOptional + deps map[string]struct{} + location []parser.Range +} + +type secretInfo struct { + required bool + location []parser.Range +} + +type sshInfo struct { + required bool + location []parser.Range +} + +func newOutlineCapture() outlineCapture { + return outlineCapture{ + allArgs: map[string]argInfo{}, + usedArgs: map[string]struct{}{}, + secrets: map[string]secretInfo{}, + ssh: map[string]sshInfo{}, + } +} + +func (o outlineCapture) clone() outlineCapture { + allArgs := map[string]argInfo{} + for k, v := range o.allArgs { + allArgs[k] = v + } + usedArgs := map[string]struct{}{} + for k := range o.usedArgs { + usedArgs[k] = struct{}{} + } + secrets := map[string]secretInfo{} + for k, v := range o.secrets { + secrets[k] = v + } + ssh := map[string]sshInfo{} + for k, v := range o.ssh { + ssh[k] = v + } + return outlineCapture{ + allArgs: allArgs, + usedArgs: usedArgs, + secrets: secrets, + ssh: ssh, + } +} + +func (o outlineCapture) markAllUsed(in map[string]struct{}) { + for k := range in { + if a, ok := o.allArgs[k]; ok { + o.markAllUsed(a.deps) + } + o.usedArgs[k] = struct{}{} + } +} + +func (ds *dispatchState) args(visited map[string]struct{}) []outline.Arg { + ds.outline.markAllUsed(ds.outline.usedArgs) + + args := make([]outline.Arg, 0, len(ds.outline.usedArgs)) + for k := range ds.outline.usedArgs { + if a, ok := ds.outline.allArgs[k]; ok { + if _, ok := visited[k]; !ok { + args = append(args, outline.Arg{ + Name: a.definition.Key, + Value: a.value, + Description: a.definition.Comment, + Location: toSourceLocation(a.location), + }) + visited[k] = struct{}{} + } + } + } + + if ds.base != nil { + args = append(args, ds.base.args(visited)...) + } + for d := range ds.deps { + args = append(args, d.args(visited)...) + } + + return args +} + +func (ds *dispatchState) secrets(visited map[string]struct{}) []outline.Secret { + secrets := make([]outline.Secret, 0, len(ds.outline.secrets)) + for k, v := range ds.outline.secrets { + if _, ok := visited[k]; !ok { + secrets = append(secrets, outline.Secret{ + Name: k, + Required: v.required, + Location: toSourceLocation(v.location), + }) + visited[k] = struct{}{} + } + } + if ds.base != nil { + secrets = append(secrets, ds.base.secrets(visited)...) + } + for d := range ds.deps { + secrets = append(secrets, d.secrets(visited)...) + } + return secrets +} + +func (ds *dispatchState) ssh(visited map[string]struct{}) []outline.SSH { + ssh := make([]outline.SSH, 0, len(ds.outline.secrets)) + for k, v := range ds.outline.ssh { + if _, ok := visited[k]; !ok { + ssh = append(ssh, outline.SSH{ + Name: k, + Required: v.required, + Location: toSourceLocation(v.location), + }) + visited[k] = struct{}{} + } + } + if ds.base != nil { + ssh = append(ssh, ds.base.ssh(visited)...) + } + for d := range ds.deps { + ssh = append(ssh, d.ssh(visited)...) + } + return ssh +} + +func (ds *dispatchState) Outline(dt []byte) outline.Outline { + args := ds.args(map[string]struct{}{}) + sort.Slice(args, func(i, j int) bool { + return compLocation(args[i].Location, args[j].Location) + }) + + secrets := ds.secrets(map[string]struct{}{}) + sort.Slice(secrets, func(i, j int) bool { + return compLocation(secrets[i].Location, secrets[j].Location) + }) + + ssh := ds.ssh(map[string]struct{}{}) + sort.Slice(ssh, func(i, j int) bool { + return compLocation(ssh[i].Location, ssh[j].Location) + }) + + out := outline.Outline{ + Name: ds.stage.Name, + Description: ds.stage.Comment, + Sources: [][]byte{dt}, + Args: args, + Secrets: secrets, + SSH: ssh, + } + + return out +} + +func toSourceLocation(r []parser.Range) *pb.Location { + if len(r) == 0 { + return nil + } + arr := make([]*pb.Range, len(r)) + for i, r := range r { + arr[i] = &pb.Range{ + Start: pb.Position{ + Line: int32(r.Start.Line), + Character: int32(r.Start.Character), + }, + End: pb.Position{ + Line: int32(r.End.Line), + Character: int32(r.End.Character), + }, + } + } + return &pb.Location{Ranges: arr} +} + +func compLocation(a, b *pb.Location) bool { + if a.SourceIndex != b.SourceIndex { + return a.SourceIndex < b.SourceIndex + } + linea := 0 + lineb := 0 + if len(a.Ranges) > 0 { + linea = int(a.Ranges[0].Start.Line) + } + if len(b.Ranges) > 0 { + lineb = int(b.Ranges[0].Start.Line) + } + return linea < lineb +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerignore/dockerignore.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerignore/dockerignore.go index cc22381339..e7f29ae8df 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerignore/dockerignore.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerignore/dockerignore.go @@ -3,10 +3,11 @@ package dockerignore import ( "bufio" "bytes" - "fmt" "io" "path/filepath" "strings" + + "github.com/pkg/errors" ) // ReadAll reads a .dockerignore file and returns the list of file patterns @@ -58,7 +59,7 @@ func ReadAll(reader io.Reader) ([]string, error) { excludes = append(excludes, pattern) } if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("Error reading .dockerignore: %v", err) + return nil, errors.Wrap(err, "error reading .dockerignore") } return excludes, nil } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go index 1cfbf76000..a527175b73 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go @@ -1,10 +1,10 @@ package instructions import ( - "fmt" "strings" "github.com/moby/buildkit/util/suggest" + "github.com/pkg/errors" ) // FlagType is the type of the build flag @@ -88,7 +88,7 @@ func (bf *BFlags) AddStrings(name string) *Flag { // Note, any error will be generated when Parse() is called (see Parse). func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { if _, ok := bf.flags[name]; ok { - bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) + bf.Err = errors.Errorf("Duplicate flag defined: %s", name) return nil } @@ -123,7 +123,8 @@ func (bf *BFlags) Used() []string { func (fl *Flag) IsTrue() bool { if fl.flagType != boolType { // Should never get here - panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) + err := errors.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name) + panic(err) } return fl.Value == "true" } @@ -134,19 +135,21 @@ func (fl *Flag) IsTrue() bool { // compile time error so it doesn't matter too much when we stop our // processing as long as we do stop it, so this allows the code // around AddXXX() to be just: -// defFlag := AddString("description", "") +// +// defFlag := AddString("description", "") +// // w/o needing to add an if-statement around each one. func (bf *BFlags) Parse() error { // If there was an error while defining the possible flags // go ahead and bubble it back up here since we didn't do it // earlier in the processing if bf.Err != nil { - return fmt.Errorf("error setting up flags: %s", bf.Err) + return errors.Wrap(bf.Err, "error setting up flags") } for _, arg := range bf.Args { if !strings.HasPrefix(arg, "--") { - return fmt.Errorf("arg should start with -- : %s", arg) + return errors.Errorf("arg should start with -- : %s", arg) } if arg == "--" { @@ -164,11 +167,12 @@ func (bf *BFlags) Parse() error { flag, ok := bf.flags[arg] if !ok { - return suggest.WrapError(fmt.Errorf("unknown flag: %s", arg), arg, allFlags(bf.flags), true) + err := errors.Errorf("unknown flag: %s", arg) + return suggest.WrapError(err, arg, allFlags(bf.flags), true) } if _, ok = bf.used[arg]; ok && flag.flagType != stringsType { - return fmt.Errorf("duplicate flag specified: %s", arg) + return errors.Errorf("duplicate flag specified: %s", arg) } bf.used[arg] = flag @@ -177,7 +181,7 @@ func (bf *BFlags) Parse() error { case boolType: // value == "" is only ok if no "=" was specified if index >= 0 && value == "" { - return fmt.Errorf("missing a value on flag: %s", arg) + return errors.Errorf("missing a value on flag: %s", arg) } lower := strings.ToLower(value) @@ -186,18 +190,18 @@ func (bf *BFlags) Parse() error { } else if lower == "true" || lower == "false" { flag.Value = lower } else { - return fmt.Errorf("expecting boolean value for flag %s, not: %s", arg, value) + return errors.Errorf("expecting boolean value for flag %s, not: %s", arg, value) } case stringType: if index < 0 { - return fmt.Errorf("missing a value on flag: %s", arg) + return errors.Errorf("missing a value on flag: %s", arg) } flag.Value = value case stringsType: if index < 0 { - return fmt.Errorf("missing a value on flag: %s", arg) + return errors.Errorf("missing a value on flag: %s", arg) } flag.StringValues = append(flag.StringValues, value) diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go index 48ebf183a9..9ffbd457ab 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go @@ -9,7 +9,10 @@ import ( "github.com/pkg/errors" ) -// KeyValuePair represent an arbitrary named value (useful in slice instead of map[string] string to preserve ordering) +// KeyValuePair represents an arbitrary named value. +// +// This is useful for commands containing key-value maps that want to preserve +// the order of insertion, instead of map[string]string which does not. type KeyValuePair struct { Key string Value string @@ -19,13 +22,17 @@ func (kvp *KeyValuePair) String() string { return kvp.Key + "=" + kvp.Value } -// KeyValuePairOptional is the same as KeyValuePair but Value is optional +// KeyValuePairOptional is identical to KeyValuePair, but allows for optional values. type KeyValuePairOptional struct { Key string Value *string Comment string } +func (kvpo *KeyValuePairOptional) String() string { + return kvpo.Key + "=" + kvpo.ValueString() +} + func (kvpo *KeyValuePairOptional) ValueString() string { v := "" if kvpo.Value != nil { @@ -34,7 +41,11 @@ func (kvpo *KeyValuePairOptional) ValueString() string { return v } -// Command is implemented by every command present in a dockerfile +// Command interface is implemented by every possible command in a Dockerfile. +// +// The interface only exposes the minimal common elements shared between every +// command, while more detailed information per-command can be extracted using +// runtime type analysis, e.g. type-switches. type Command interface { Name() string Location() []parser.Range @@ -68,17 +79,18 @@ func newWithNameAndCode(req parseRequest) withNameAndCode { return withNameAndCode{code: strings.TrimSpace(req.original), name: req.command, location: req.location} } -// SingleWordExpander is a provider for variable expansion where 1 word => 1 output +// SingleWordExpander is a provider for variable expansion where a single word +// corresponds to a single output. type SingleWordExpander func(word string) (string, error) -// SupportsSingleWordExpansion interface marks a command as supporting variable -// expansion +// SupportsSingleWordExpansion interface allows a command to support variable. type SupportsSingleWordExpansion interface { Expand(expander SingleWordExpander) error } -// SupportsSingleWordExpansionRaw interface marks a command as supporting -// variable expansion, while ensuring that quotes are preserved +// SupportsSingleWordExpansionRaw interface allows a command to support +// variable expansion, while ensuring that minimal transformations are applied +// during expansion, so that quotes and other special characters are preserved. type SupportsSingleWordExpansionRaw interface { ExpandRaw(expander SingleWordExpander) error } @@ -121,18 +133,22 @@ func expandSliceInPlace(values []string, expander SingleWordExpander) error { return nil } -// EnvCommand : ENV key1 value1 [keyN valueN...] +// EnvCommand allows setting an variable in the container's environment. +// +// ENV key1 value1 [keyN valueN...] type EnvCommand struct { withNameAndCode - Env KeyValuePairs // kvp slice instead of map to preserve ordering + Env KeyValuePairs } -// Expand variables func (c *EnvCommand) Expand(expander SingleWordExpander) error { return expandKvpsInPlace(c.Env, expander) } -// MaintainerCommand : MAINTAINER maintainer_name +// MaintainerCommand (deprecated) allows specifying a maintainer details for +// the image. +// +// MAINTAINER maintainer_name type MaintainerCommand struct { withNameAndCode Maintainer string @@ -154,17 +170,15 @@ func NewLabelCommand(k string, v string, NoExp bool) *LabelCommand { return cmd } -// LabelCommand : LABEL some json data describing the image -// -// Sets the Label variable foo to bar, +// LabelCommand sets an image label in the output // +// LABEL some json data describing the image type LabelCommand struct { withNameAndCode - Labels KeyValuePairs // kvp slice instead of map to preserve ordering + Labels KeyValuePairs noExpand bool } -// Expand variables func (c *LabelCommand) Expand(expander SingleWordExpander) error { if c.noExpand { return nil @@ -174,16 +188,16 @@ func (c *LabelCommand) Expand(expander SingleWordExpander) error { // SourceContent represents an anonymous file object type SourceContent struct { - Path string - Data string - Expand bool + Path string // path to the file + Data string // string content from the file + Expand bool // whether to expand file contents } // SourcesAndDest represent a collection of sources and a destination type SourcesAndDest struct { - DestPath string - SourcePaths []string - SourceContents []SourceContent + DestPath string // destination to write output + SourcePaths []string // file path sources + SourceContents []SourceContent // anonymous file sources } func (s *SourcesAndDest) Expand(expander SingleWordExpander) error { @@ -216,20 +230,22 @@ func (s *SourcesAndDest) ExpandRaw(expander SingleWordExpander) error { return nil } -// AddCommand : ADD foo /path +// AddCommand adds files from the provided sources to the target destination. // -// Add the file 'foo' to '/path'. Tarball and Remote URL (http, https) handling -// exist here. If you do not wish to have this automatic handling, use COPY. +// ADD foo /path // +// ADD supports tarball and remote URL handling, which may not always be +// desired - if you do not wish to have this automatic handling, use COPY. type AddCommand struct { withNameAndCode SourcesAndDest - Chown string - Chmod string - Link bool + Chown string + Chmod string + Link bool + KeepGitDir bool // whether to keep .git dir, only meaningful for git sources + Checksum string } -// Expand variables func (c *AddCommand) Expand(expander SingleWordExpander) error { expandedChown, err := expander(c.Chown) if err != nil { @@ -237,13 +253,20 @@ func (c *AddCommand) Expand(expander SingleWordExpander) error { } c.Chown = expandedChown + expandedChecksum, err := expander(c.Checksum) + if err != nil { + return err + } + c.Checksum = expandedChecksum + return c.SourcesAndDest.Expand(expander) } -// CopyCommand : COPY foo /path +// CopyCommand copies files from the provided sources to the target destination. // -// Same as 'ADD' but without the tar and remote url handling. +// COPY foo /path // +// Same as 'ADD' but without the magic additional tarball and remote URL handling. type CopyCommand struct { withNameAndCode SourcesAndDest @@ -253,7 +276,6 @@ type CopyCommand struct { Link bool } -// Expand variables func (c *CopyCommand) Expand(expander SingleWordExpander) error { expandedChown, err := expander(c.Chown) if err != nil { @@ -264,22 +286,24 @@ func (c *CopyCommand) Expand(expander SingleWordExpander) error { return c.SourcesAndDest.Expand(expander) } -// OnbuildCommand : ONBUILD +// OnbuildCommand allows specifying a command to be run on builds the use the +// resulting build image as a base image. +// +// ONBUILD type OnbuildCommand struct { withNameAndCode Expression string } -// WorkdirCommand : WORKDIR /tmp -// -// Set the working directory for future RUN/CMD/etc statements. +// WorkdirCommand sets the current working directory for all future commands in +// the stage // +// WORKDIR /tmp type WorkdirCommand struct { withNameAndCode Path string } -// Expand variables func (c *WorkdirCommand) Expand(expander SingleWordExpander) error { p, err := expander(c.Path) if err != nil { @@ -303,16 +327,13 @@ type ShellDependantCmdLine struct { PrependShell bool } -// RunCommand : RUN some command yo +// RunCommand runs a command. // -// run a command and commit the image. Args are automatically prepended with -// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under -// Windows, in the event there is only one argument The difference in processing: +// RUN "echo hi" # sh -c "echo hi" // -// RUN echo hi # sh -c echo hi (Linux) -// RUN echo hi # cmd /S /C echo hi (Windows) -// RUN [ "echo", "hi" ] # echo hi +// or // +// RUN ["echo", "hi"] # echo hi type RunCommand struct { withNameAndCode withExternalData @@ -327,60 +348,54 @@ func (c *RunCommand) Expand(expander SingleWordExpander) error { return nil } -// CmdCommand : CMD foo +// CmdCommand sets the default command to run in the container on start. // -// Set the default command to run in the container (which may be empty). -// Argument handling is the same as RUN. +// CMD "echo hi" # sh -c "echo hi" // +// or +// +// CMD ["echo", "hi"] # echo hi type CmdCommand struct { withNameAndCode ShellDependantCmdLine } -// HealthCheckCommand : HEALTHCHECK foo -// -// Set the default healthcheck command to run in the container (which may be empty). -// Argument handling is the same as RUN. +// HealthCheckCommand sets the default healthcheck command to run in the container. // +// HEALTHCHECK type HealthCheckCommand struct { withNameAndCode Health *container.HealthConfig } -// EntrypointCommand : ENTRYPOINT /usr/sbin/nginx +// EntrypointCommand sets the default entrypoint of the container to use the +// provided command. // -// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments -// to /usr/sbin/nginx. Uses the default shell if not in JSON format. -// -// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint -// is initialized at newBuilder time instead of through argument parsing. +// ENTRYPOINT /usr/sbin/nginx // +// Entrypoint uses the default shell if not in JSON format. type EntrypointCommand struct { withNameAndCode ShellDependantCmdLine } -// ExposeCommand : EXPOSE 6667/tcp 7000/tcp -// -// Expose ports for links and port mappings. This all ends up in -// req.runConfig.ExposedPorts for runconfig. +// ExposeCommand marks a container port that can be exposed at runtime. // +// EXPOSE 6667/tcp 7000/tcp type ExposeCommand struct { withNameAndCode Ports []string } -// UserCommand : USER foo -// -// Set the user to 'foo' for future commands and when running the -// ENTRYPOINT/CMD at container run time. +// UserCommand sets the user for the rest of the stage, and when starting the +// container at run-time. // +// USER user type UserCommand struct { withNameAndCode User string } -// Expand variables func (c *UserCommand) Expand(expander SingleWordExpander) error { p, err := expander(c.User) if err != nil { @@ -390,29 +405,26 @@ func (c *UserCommand) Expand(expander SingleWordExpander) error { return nil } -// VolumeCommand : VOLUME /foo -// -// Expose the volume /foo for use. Will also accept the JSON array form. +// VolumeCommand exposes the specified volume for use in the build environment. // +// VOLUME /foo type VolumeCommand struct { withNameAndCode Volumes []string } -// Expand variables func (c *VolumeCommand) Expand(expander SingleWordExpander) error { return expandSliceInPlace(c.Volumes, expander) } -// StopSignalCommand : STOPSIGNAL signal +// StopSignalCommand sets the signal that will be used to kill the container. // -// Set the signal that will be used to kill the container. +// STOPSIGNAL signal type StopSignalCommand struct { withNameAndCode Signal string } -// Expand variables func (c *StopSignalCommand) Expand(expander SingleWordExpander) error { p, err := expander(c.Signal) if err != nil { @@ -430,17 +442,16 @@ func (c *StopSignalCommand) CheckPlatform(platform string) error { return nil } -// ArgCommand : ARG name[=value] +// ArgCommand adds the specified variable to the list of variables that can be +// passed to the builder using the --build-arg flag for expansion and +// substitution. // -// Adds the variable foo to the trusted list of variables that can be passed -// to builder using the --build-arg flag for expansion/substitution or passing to 'run'. -// Dockerfile author may optionally set a default value of this variable. +// ARG name[=value] type ArgCommand struct { withNameAndCode Args []KeyValuePairOptional } -// Expand variables func (c *ArgCommand) Expand(expander SingleWordExpander) error { for i, v := range c.Args { p, err := expander(v.Key) @@ -460,32 +471,42 @@ func (c *ArgCommand) Expand(expander SingleWordExpander) error { return nil } -// ShellCommand : SHELL powershell -command +// ShellCommand sets a custom shell to use. // -// Set the non-default shell to use. +// SHELL bash -e -c type ShellCommand struct { withNameAndCode Shell strslice.StrSlice } -// Stage represents a single stage in a multi-stage build +// Stage represents a bundled collection of commands. +// +// Each stage begins with a FROM command (which is consumed into the Stage), +// indicating the source or stage to derive from, and ends either at the +// end-of-the file, or the start of the next stage. +// +// Stages can be named, and can be additionally configured to use a specific +// platform, in the case of a multi-arch base image. type Stage struct { - Name string - Commands []Command - BaseName string - SourceCode string - Platform string - Location []parser.Range - Comment string + Name string // name of the stage + Commands []Command // commands contained within the stage + BaseName string // name of the base stage or source + Platform string // platform of base source to use + + Comment string // doc-comment directly above the stage + + SourceCode string // contents of the defining FROM command + Location []parser.Range // location of the defining FROM command } -// AddCommand to the stage +// AddCommand appends a command to the stage. func (s *Stage) AddCommand(cmd Command) { // todo: validate cmd type s.Commands = append(s.Commands, cmd) } -// IsCurrentStage check if the stage name is the current stage +// IsCurrentStage returns true if the provided stage name is the name of the +// current stage, and false otherwise. func IsCurrentStage(s []Stage, name string) bool { if len(s) == 0 { return false @@ -493,7 +514,7 @@ func IsCurrentStage(s []Stage, name string) bool { return s[len(s)-1].Name == name } -// CurrentStage return the last stage in a slice +// CurrentStage returns the last stage from a list of stages. func CurrentStage(s []Stage) (*Stage, error) { if len(s) == 0 { return nil, errors.New("no build stage in current context") @@ -501,7 +522,7 @@ func CurrentStage(s []Stage) (*Stage, error) { return &s[len(s)-1], nil } -// HasStage looks for the presence of a given stage name +// HasStage looks for the presence of a given stage name from a list of stages. func HasStage(s []Stage, name string) (int, bool) { for i, stage := range s { // Stage name is case-insensitive by design diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go index 517ded7d67..e328b27bc7 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go @@ -6,7 +6,7 @@ import ( "strconv" "strings" - dockeropts "github.com/docker/docker/opts" + "github.com/docker/go-units" "github.com/moby/buildkit/util/suggest" "github.com/pkg/errors" ) @@ -231,11 +231,10 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { } case "size": if m.Type == "tmpfs" { - tmpfsSize := new(dockeropts.MemBytes) - if err := tmpfsSize.Set(value); err != nil { + m.SizeLimit, err = units.RAMInBytes(value) + if err != nil { return nil, errors.Errorf("invalid value for %s: %s", key, value) } - m.SizeLimit = tmpfsSize.Value() } else { return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go index 610aed7cc0..7f1eaa5deb 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go @@ -3,8 +3,8 @@ package instructions -import "fmt" +import "github.com/pkg/errors" func errNotJSON(command, _ string) error { - return fmt.Errorf("%s requires the arguments to be in JSON form", command) + return errors.Errorf("%s requires the arguments to be in JSON form", command) } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go index a4843c5b6a..1eec9d126c 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_windows.go @@ -5,6 +5,8 @@ import ( "path/filepath" "regexp" "strings" + + "github.com/pkg/errors" ) func errNotJSON(command, original string) error { @@ -23,5 +25,5 @@ func errNotJSON(command, original string) error { strings.Contains(original, "]") { extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original) } - return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra) + return errors.Errorf("%s requires the arguments to be in JSON form%s", command, extra) } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go index d3b7326ce2..6c362fc6fa 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go @@ -1,3 +1,7 @@ +// The instructions package contains the definitions of the high-level +// Dockerfile commands, as well as low-level primitives for extracting these +// commands from a pre-parsed Abstract Syntax Tree. + package instructions import ( @@ -37,7 +41,7 @@ func nodeArgs(node *parser.Node) []string { if len(arg.Children) == 0 { result = append(result, arg.Value) } else if len(arg.Children) == 1 { - //sub command + // sub command result = append(result, arg.Children[0].Value) result = append(result, nodeArgs(arg.Children[0])...) } @@ -281,6 +285,8 @@ func parseAdd(req parseRequest) (*AddCommand, error) { flChown := req.flags.AddString("chown", "") flChmod := req.flags.AddString("chmod", "") flLink := req.flags.AddBool("link", false) + flKeepGitDir := req.flags.AddBool("keep-git-dir", false) + flChecksum := req.flags.AddString("checksum", "") if err := req.flags.Parse(); err != nil { return nil, err } @@ -296,6 +302,8 @@ func parseAdd(req parseRequest) (*AddCommand, error) { Chown: flChown.Value, Chmod: flChmod.Value, Link: flLink.Value == "true", + KeepGitDir: flKeepGitDir.Value == "true", + Checksum: flChecksum.Value, }, nil } @@ -377,7 +385,7 @@ func parseOnBuild(req parseRequest) (*OnbuildCommand, error) { case "ONBUILD": return nil, errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") case "MAINTAINER", "FROM": - return nil, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) + return nil, errors.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) } original := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(req.original, "") @@ -503,8 +511,11 @@ func parseOptInterval(f *Flag) (time.Duration, error) { if err != nil { return 0, err } + if d == 0 { + return 0, nil + } if d < container.MinimumDuration { - return 0, fmt.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration) + return 0, errors.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration) } return d, nil } @@ -551,7 +562,7 @@ func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) { healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...)) default: - return nil, fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) + return nil, errors.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) } interval, err := parseOptInterval(flInterval) @@ -577,8 +588,8 @@ func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) { if err != nil { return nil, err } - if retries < 1 { - return nil, fmt.Errorf("--retries must be at least 1 (not %d)", retries) + if retries < 0 { + return nil, errors.Errorf("--retries cannot be negative (%d)", retries) } healthcheck.Retries = int(retries) } else { @@ -725,7 +736,7 @@ func errExactlyOneArgument(command string) error { } func errNoDestinationArgument(command string) error { - return errors.Errorf("%s requires at least two arguments, but only one was provided. Destination could not be determined.", command) + return errors.Errorf("%s requires at least two arguments, but only one was provided. Destination could not be determined", command) } func errBadHeredoc(command string, option string) error { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go new file mode 100644 index 0000000000..db1668f252 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go @@ -0,0 +1,171 @@ +package parser + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/pkg/errors" +) + +const ( + keySyntax = "syntax" + keyEscape = "escape" +) + +var validDirectives = map[string]struct{}{ + keySyntax: {}, + keyEscape: {}, +} + +type Directive struct { + Name string + Value string + Location []Range +} + +// DirectiveParser is a parser for Dockerfile directives that enforces the +// quirks of the directive parser. +type DirectiveParser struct { + line int + regexp *regexp.Regexp + seen map[string]struct{} + done bool +} + +func (d *DirectiveParser) setComment(comment string) { + d.regexp = regexp.MustCompile(fmt.Sprintf(`^%s\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`, comment)) +} + +func (d *DirectiveParser) ParseLine(line []byte) (*Directive, error) { + d.line++ + if d.done { + return nil, nil + } + if d.regexp == nil { + d.setComment("#") + } + + match := d.regexp.FindSubmatch(line) + if len(match) == 0 { + d.done = true + return nil, nil + } + + k := strings.ToLower(string(match[1])) + if _, ok := validDirectives[k]; !ok { + d.done = true + return nil, nil + } + if d.seen == nil { + d.seen = map[string]struct{}{} + } + if _, ok := d.seen[k]; ok { + return nil, errors.Errorf("only one %s parser directive can be used", k) + } + d.seen[k] = struct{}{} + + v := string(match[2]) + + directive := Directive{ + Name: k, + Value: v, + Location: []Range{{ + Start: Position{Line: d.line}, + End: Position{Line: d.line}, + }}, + } + return &directive, nil +} + +func (d *DirectiveParser) ParseAll(data []byte) ([]*Directive, error) { + scanner := bufio.NewScanner(bytes.NewReader(data)) + var directives []*Directive + for scanner.Scan() { + if d.done { + break + } + + d, err := d.ParseLine(scanner.Bytes()) + if err != nil { + return directives, err + } + if d != nil { + directives = append(directives, d) + } + } + return directives, nil +} + +// DetectSyntax returns the syntax of provided input. +// +// The traditional dockerfile directives '# syntax = ...' are used by default, +// however, the function will also fallback to c-style directives '// syntax = ...' +// and json-encoded directives '{ "syntax": "..." }'. Finally, starting lines +// with '#!' are treated as shebangs and ignored. +// +// This allows for a flexible range of input formats, and appropriate syntax +// selection. +func DetectSyntax(dt []byte) (string, string, []Range, bool) { + dt, hadShebang, err := discardShebang(dt) + if err != nil { + return "", "", nil, false + } + line := 0 + if hadShebang { + line++ + } + + // use default directive parser, and search for #syntax= + directiveParser := DirectiveParser{line: line} + if syntax, cmdline, loc, ok := detectSyntaxFromParser(dt, directiveParser); ok { + return syntax, cmdline, loc, true + } + + // use directive with different comment prefix, and search for //syntax= + directiveParser = DirectiveParser{line: line} + directiveParser.setComment("//") + if syntax, cmdline, loc, ok := detectSyntaxFromParser(dt, directiveParser); ok { + return syntax, cmdline, loc, true + } + + // search for possible json directives + var directive struct { + Syntax string `json:"syntax"` + } + if err := json.Unmarshal(dt, &directive); err == nil { + if directive.Syntax != "" { + loc := []Range{{ + Start: Position{Line: line}, + End: Position{Line: line}, + }} + return directive.Syntax, directive.Syntax, loc, true + } + } + + return "", "", nil, false +} + +func detectSyntaxFromParser(dt []byte, parser DirectiveParser) (string, string, []Range, bool) { + directives, _ := parser.ParseAll(dt) + for _, d := range directives { + // check for syntax directive before erroring out, since the error + // might have occurred *after* the syntax directive + if d.Name == keySyntax { + p, _, _ := strings.Cut(d.Value, " ") + return p, d.Value, d.Location, true + } + } + return "", "", nil, false +} + +func discardShebang(dt []byte) ([]byte, bool, error) { + line, rest, _ := bytes.Cut(dt, []byte("\n")) + if bytes.HasPrefix(line, []byte("#!")) { + return rest, true, nil + } + return dt, false, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go index c0d0a55d12..db8d0bda23 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go @@ -8,7 +8,6 @@ package parser import ( "encoding/json" - "fmt" "strings" "unicode" "unicode/utf8" @@ -34,7 +33,6 @@ func parseIgnore(rest string, d *directives) (*Node, map[string]bool, error) { // statement with sub-statements. // // ONBUILD RUN foo bar -> (onbuild (run foo bar)) -// func parseSubCommand(rest string, d *directives) (*Node, map[string]bool, error) { if rest == "" { return nil, nil, nil @@ -154,7 +152,7 @@ func parseNameVal(rest string, key string, d *directives) (*Node, error) { if !strings.Contains(words[0], "=") { parts := reWhitespace.Split(rest, 2) if len(parts) < 2 { - return nil, fmt.Errorf(key + " must have two arguments") + return nil, errors.Errorf("%s must have two arguments", key) } return newKeyValueNode(parts[0], parts[1]), nil } @@ -163,7 +161,7 @@ func parseNameVal(rest string, key string, d *directives) (*Node, error) { var prevNode *Node for _, word := range words { if !strings.Contains(word, "=") { - return nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) + return nil, errors.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) } parts := strings.SplitN(word, "=", 2) @@ -274,7 +272,7 @@ func parseString(rest string, d *directives) (*Node, map[string]bool, error) { func parseJSON(rest string, d *directives) (*Node, map[string]bool, error) { rest = strings.TrimLeftFunc(rest, unicode.IsSpace) if !strings.HasPrefix(rest, "[") { - return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) + return nil, nil, errors.Errorf("Error parsing %q as a JSON array", rest) } var myJSON []interface{} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go index 53165e0a48..d6723635d4 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go @@ -1,4 +1,5 @@ -// Package parser implements a parser and parse tree dumper for Dockerfiles. +// The parser package implements a parser that transforms a raw byte-stream +// into a low-level Abstract Syntax Tree. package parser import ( @@ -27,7 +28,6 @@ import ( // This data structure is frankly pretty lousy for handling complex languages, // but lucky for us the Dockerfile isn't very complicated. This structure // works a little more effectively than a "proper" parse tree for our needs. -// type Node struct { Value string // actual content Next *Node // the next item in the current sexp @@ -115,7 +115,6 @@ type Heredoc struct { var ( dispatch map[string]func(string, *directives) (*Node, map[string]bool, error) reWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) - reDirectives = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`) reComment = regexp.MustCompile(`^#.*$`) reHeredoc = regexp.MustCompile(`^(\d*)<<(-?)([^<]*)$`) reLeadingTabs = regexp.MustCompile(`(?m)^\t+`) @@ -124,11 +123,6 @@ var ( // DefaultEscapeToken is the default escape token const DefaultEscapeToken = '\\' -var validDirectives = map[string]struct{}{ - "escape": {}, - "syntax": {}, -} - var ( // Directives allowed to contain heredocs heredocDirectives = map[string]bool{ @@ -143,13 +137,12 @@ var ( } ) -// directive is the structure used during a build run to hold the state of +// directives is the structure used during a build run to hold the state of // parsing directives. type directives struct { - escapeToken rune // Current escape token - lineContinuationRegex *regexp.Regexp // Current line continuation regex - done bool // Whether we are done looking for directives - seen map[string]struct{} // Whether the escape directive has been seen + parser DirectiveParser + escapeToken rune // Current escape token + lineContinuationRegex *regexp.Regexp // Current line continuation regex } // setEscapeToken sets the default token for escaping characters and as line- @@ -178,40 +171,19 @@ func (d *directives) setEscapeToken(s string) error { // Parser directives must precede any builder instruction or other comments, // and cannot be repeated. func (d *directives) possibleParserDirective(line string) error { - if d.done { - return nil + directive, err := d.parser.ParseLine([]byte(line)) + if err != nil { + return err } - - match := reDirectives.FindStringSubmatch(line) - if len(match) == 0 { - d.done = true - return nil + if directive != nil && directive.Name == keyEscape { + return d.setEscapeToken(directive.Value) } - - k := strings.ToLower(match[1]) - _, ok := validDirectives[k] - if !ok { - d.done = true - return nil - } - - if _, ok := d.seen[k]; ok { - return errors.Errorf("only one %s parser directive can be used", k) - } - d.seen[k] = struct{}{} - - if k == "escape" { - return d.setEscapeToken(match[2]) - } - return nil } // newDefaultDirectives returns a new directives structure with the default escapeToken token func newDefaultDirectives() *directives { - d := &directives{ - seen: map[string]struct{}{}, - } + d := &directives{} d.setEscapeToken(string(DefaultEscapeToken)) return d } @@ -274,13 +246,15 @@ func newNodeFromLine(line string, d *directives, comments []string) (*Node, erro }, nil } -// Result is the result of parsing a Dockerfile +// Result contains the bundled outputs from parsing a Dockerfile. type Result struct { AST *Node EscapeToken rune Warnings []Warning } +// Warning contains information to identify and locate a warning generated +// during parsing. type Warning struct { Short string Detail [][]byte @@ -301,8 +275,8 @@ func (r *Result) PrintWarnings(out io.Writer) { } } -// Parse reads lines from a Reader, parses the lines into an AST and returns -// the AST and escape token +// Parse consumes lines from a provided Reader, parses each line into an AST +// and returns the results of doing so. func Parse(rwc io.Reader) (*Result, error) { d := newDefaultDirectives() currentLine := 0 @@ -421,7 +395,7 @@ func Parse(rwc io.Reader) (*Result, error) { }, withLocation(handleScannerError(scanner.Err()), currentLine, 0) } -// Extracts a heredoc from a possible heredoc regex match +// heredocFromMatch extracts a heredoc from a possible heredoc regex match. func heredocFromMatch(match []string) (*Heredoc, error) { if len(match) == 0 { return nil, nil @@ -457,7 +431,7 @@ func heredocFromMatch(match []string) (*Heredoc, error) { return nil, err } if len(wordsRaw) != len(words) { - return nil, fmt.Errorf("internal lexing of heredoc produced inconsistent results: %s", rest) + return nil, errors.Errorf("internal lexing of heredoc produced inconsistent results: %s", rest) } word := words[0] @@ -475,9 +449,14 @@ func heredocFromMatch(match []string) (*Heredoc, error) { }, nil } +// ParseHeredoc parses a heredoc word from a target string, returning the +// components from the doc. func ParseHeredoc(src string) (*Heredoc, error) { return heredocFromMatch(reHeredoc.FindStringSubmatch(src)) } + +// MustParseHeredoc is a variant of ParseHeredoc that discards the error, if +// there was one present. func MustParseHeredoc(src string) *Heredoc { heredoc, _ := ParseHeredoc(src) return heredoc @@ -503,6 +482,7 @@ func heredocsFromLine(line string) ([]Heredoc, error) { return docs, nil } +// ChompHeredocContent chomps leading tabs from the heredoc. func ChompHeredocContent(src string) string { return reLeadingTabs.ReplaceAllString(src, "") } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go index 23ab81f25c..b930ab3260 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go @@ -377,7 +377,7 @@ func (sw *shellWord) processDollar() (string, error) { } // Grab the current value of the variable in question so we - // can use to to determine what to do based on the modifier + // can use it to determine what to do based on the modifier newValue, found := sw.getEnv(name) switch modifier { diff --git a/vendor/github.com/moby/buildkit/frontend/frontend.go b/vendor/github.com/moby/buildkit/frontend/frontend.go index dedda54c61..024ac80204 100644 --- a/vendor/github.com/moby/buildkit/frontend/frontend.go +++ b/vendor/github.com/moby/buildkit/frontend/frontend.go @@ -6,10 +6,16 @@ import ( "github.com/moby/buildkit/client/llb" gw "github.com/moby/buildkit/frontend/gateway/client" "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/solver/result" digest "github.com/opencontainers/go-digest" ) +type Result = result.Result[solver.ResultProxy] + +type Attestation = result.Attestation[solver.ResultProxy] + type Frontend interface { Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string, inputs map[string]*pb.Definition, sid string, sm *session.Manager) (*Result, error) } diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/attestation.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/attestation.go new file mode 100644 index 0000000000..5ffe67233c --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/client/attestation.go @@ -0,0 +1,51 @@ +package client + +import ( + pb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/solver/result" + "github.com/pkg/errors" +) + +func AttestationToPB[T any](a *result.Attestation[T]) (*pb.Attestation, error) { + if a.ContentFunc != nil { + return nil, errors.Errorf("attestation callback cannot be sent through gateway") + } + + subjects := make([]*pb.InTotoSubject, len(a.InToto.Subjects)) + for i, subject := range a.InToto.Subjects { + subjects[i] = &pb.InTotoSubject{ + Kind: subject.Kind, + Name: subject.Name, + Digest: subject.Digest, + } + } + + return &pb.Attestation{ + Kind: a.Kind, + Metadata: a.Metadata, + Path: a.Path, + InTotoPredicateType: a.InToto.PredicateType, + InTotoSubjects: subjects, + }, nil +} + +func AttestationFromPB[T any](a *pb.Attestation) (*result.Attestation[T], error) { + subjects := make([]result.InTotoSubject, len(a.InTotoSubjects)) + for i, subject := range a.InTotoSubjects { + subjects[i] = result.InTotoSubject{ + Kind: subject.Kind, + Name: subject.Name, + Digest: subject.Digest, + } + } + + return &result.Attestation[T]{ + Kind: a.Kind, + Metadata: a.Metadata, + Path: a.Path, + InToto: result.InTotoAttestation{ + PredicateType: a.InTotoPredicateType, + Subjects: subjects, + }, + }, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go index 61bc018ff5..7b6b9de132 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go @@ -7,12 +7,24 @@ import ( "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/solver/result" + spb "github.com/moby/buildkit/sourcepolicy/pb" "github.com/moby/buildkit/util/apicaps" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" fstypes "github.com/tonistiigi/fsutil/types" ) +type Result = result.Result[Reference] + +type Attestation = result.Attestation[Reference] + +type BuildFunc func(context.Context, Client) (*Result, error) + +func NewResult() *Result { + return &Result{} +} + type Client interface { Solve(ctx context.Context, req SolveRequest) (*Result, error) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) @@ -64,6 +76,8 @@ type StartRequest struct { Stdin io.ReadCloser Stdout, Stderr io.WriteCloser SecurityMode pb.SecurityMode + + RemoveMountStubsRecursive bool } // WinSize is same as executor.WinSize, copied here to prevent circular package @@ -82,6 +96,7 @@ type ContainerProcess interface { type Reference interface { ToState() (llb.State, error) + Evaluate(ctx context.Context) error ReadFile(ctx context.Context, req ReadRequest) ([]byte, error) StatFile(ctx context.Context, req StatRequest) (*fstypes.Stat, error) ReadDir(ctx context.Context, req ReadDirRequest) ([]*fstypes.Stat, error) @@ -114,6 +129,7 @@ type SolveRequest struct { FrontendOpt map[string]string FrontendInputs map[string]*pb.Definition CacheImports []CacheOptionsEntry + SourcePolicies []*spb.Policy } type CacheOptionsEntry struct { diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go deleted file mode 100644 index bd54228478..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go +++ /dev/null @@ -1,54 +0,0 @@ -package client - -import ( - "context" - "sync" - - "github.com/pkg/errors" -) - -type BuildFunc func(context.Context, Client) (*Result, error) - -type Result struct { - mu sync.Mutex - Ref Reference - Refs map[string]Reference - Metadata map[string][]byte -} - -func NewResult() *Result { - return &Result{} -} - -func (r *Result) AddMeta(k string, v []byte) { - r.mu.Lock() - if r.Metadata == nil { - r.Metadata = map[string][]byte{} - } - r.Metadata[k] = v - r.mu.Unlock() -} - -func (r *Result) AddRef(k string, ref Reference) { - r.mu.Lock() - if r.Refs == nil { - r.Refs = map[string]Reference{} - } - r.Refs[k] = ref - r.mu.Unlock() -} - -func (r *Result) SetRef(ref Reference) { - r.Ref = ref -} - -func (r *Result) SingleRef() (Reference, error) { - r.mu.Lock() - defer r.mu.Unlock() - - if r.Refs != nil && r.Ref == nil { - return nil, errors.Errorf("invalid map result") - } - - return r.Ref, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/container.go b/vendor/github.com/moby/buildkit/frontend/gateway/container.go index 45cf2d90eb..d6161d1def 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/container.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/container.go @@ -298,14 +298,15 @@ func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartReques signal := make(chan syscall.Signal) procInfo := executor.ProcessInfo{ Meta: executor.Meta{ - Args: req.Args, - Env: req.Env, - User: req.User, - Cwd: req.Cwd, - Tty: req.Tty, - NetMode: gwCtr.netMode, - ExtraHosts: gwCtr.extraHosts, - SecurityMode: req.SecurityMode, + Args: req.Args, + Env: req.Env, + User: req.User, + Cwd: req.Cwd, + Tty: req.Tty, + NetMode: gwCtr.netMode, + ExtraHosts: gwCtr.extraHosts, + SecurityMode: req.SecurityMode, + RemoveMountStubsRecursive: req.RemoveMountStubsRecursive, }, Stdin: req.Stdin, Stdout: req.Stdout, diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go index 0a95de377d..e13894ba37 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go @@ -17,6 +17,7 @@ import ( "github.com/moby/buildkit/solver/errdefs" llberrdefs "github.com/moby/buildkit/solver/llbsolver/errdefs" opspb "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/solver/result" "github.com/moby/buildkit/util/apicaps" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" @@ -33,7 +34,6 @@ func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLL sid: sid, sm: sm, workers: w, - final: map[*ref]struct{}{}, workerRefByID: make(map[string]*worker.WorkerRef), } bc.buildOpts = bc.loadBuildOpts() @@ -45,7 +45,6 @@ type bridgeClient struct { mu sync.Mutex opts map[string]string inputs map[string]*opspb.Definition - final map[*ref]struct{} sid string sm *session.Manager refs []*ref @@ -63,31 +62,32 @@ func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*cli FrontendOpt: req.FrontendOpt, FrontendInputs: req.FrontendInputs, CacheImports: req.CacheImports, + SourcePolicies: req.SourcePolicies, }, c.sid) if err != nil { return nil, c.wrapSolveError(err) } + for _, atts := range res.Attestations { + for _, att := range atts { + if att.ContentFunc != nil { + return nil, errors.Errorf("attestation callback cannot be sent through gateway") + } + } + } - cRes := &client.Result{} c.mu.Lock() - for k, r := range res.Refs { + cRes, err := result.ConvertResult(res, func(r solver.ResultProxy) (client.Reference, error) { rr, err := c.newRef(r, session.NewGroup(c.sid)) if err != nil { return nil, err } c.refs = append(c.refs, rr) - cRes.AddRef(k, rr) - } - if r := res.Ref; r != nil { - rr, err := c.newRef(r, session.NewGroup(c.sid)) - if err != nil { - return nil, err - } - c.refs = append(c.refs, rr) - cRes.SetRef(rr) - } + return rr, nil + }) c.mu.Unlock() - cRes.Metadata = res.Metadata + if err != nil { + return nil, err + } return cRes, nil } @@ -185,30 +185,24 @@ func (c *bridgeClient) toFrontendResult(r *client.Result) (*frontend.Result, err if r == nil { return nil, nil } - - res := &frontend.Result{} - - if r.Refs != nil { - res.Refs = make(map[string]solver.ResultProxy, len(r.Refs)) - for k, r := range r.Refs { - rr, ok := r.(*ref) - if !ok { - return nil, errors.Errorf("invalid reference type for forward %T", r) + for _, atts := range r.Attestations { + for _, att := range atts { + if att.ContentFunc != nil { + return nil, errors.Errorf("attestation callback cannot be sent through gateway") } - c.final[rr] = struct{}{} - res.Refs[k] = rr.ResultProxy } } - if r := r.Ref; r != nil { + + res, err := result.ConvertResult(r, func(r client.Reference) (solver.ResultProxy, error) { rr, ok := r.(*ref) if !ok { return nil, errors.Errorf("invalid reference type for forward %T", r) } - c.final[rr] = struct{}{} - res.Ref = rr.ResultProxy + return rr.acquireResultProxy(), nil + }) + if err != nil { + return nil, err } - res.Metadata = r.Metadata - return res, nil } @@ -223,8 +217,11 @@ func (c *bridgeClient) discard(err error) { } for _, r := range c.refs { if r != nil { - if _, ok := c.final[r]; !ok || err != nil { - r.Release(context.TODO()) + r.resultProxy.Release(context.TODO()) + if err != nil { + for _, clone := range r.resultProxyClones { + clone.Release(context.TODO()) + } } } } @@ -253,7 +250,7 @@ func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainer return errors.Errorf("unexpected Ref type: %T", m.Ref) } - res, err := refProxy.Result(ctx) + res, err := refProxy.resultProxy.Result(ctx) if err != nil { return err } @@ -309,24 +306,41 @@ func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainer return ctr, nil } +func (c *bridgeClient) newRef(r solver.ResultProxy, s session.Group) (*ref, error) { + return &ref{resultProxy: r, session: s, c: c}, nil +} + type ref struct { - solver.ResultProxy + resultProxy solver.ResultProxy + resultProxyClones []solver.ResultProxy + session session.Group c *bridgeClient } -func (c *bridgeClient) newRef(r solver.ResultProxy, s session.Group) (*ref, error) { - return &ref{ResultProxy: r, session: s, c: c}, nil +func (r *ref) acquireResultProxy() solver.ResultProxy { + s1, s2 := solver.SplitResultProxy(r.resultProxy) + r.resultProxy = s1 + r.resultProxyClones = append(r.resultProxyClones, s2) + return s2 } func (r *ref) ToState() (st llb.State, err error) { - defop, err := llb.NewDefinitionOp(r.Definition()) + defop, err := llb.NewDefinitionOp(r.resultProxy.Definition()) if err != nil { return st, err } return llb.NewState(defop), nil } +func (r *ref) Evaluate(ctx context.Context) error { + _, err := r.resultProxy.Result(ctx) + if err != nil { + return r.c.wrapSolveError(err) + } + return nil +} + func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) { m, err := r.getMountable(ctx) if err != nil { @@ -365,7 +379,7 @@ func (r *ref) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.St } func (r *ref) getMountable(ctx context.Context) (snapshot.Mountable, error) { - rr, err := r.ResultProxy.Result(ctx) + rr, err := r.resultProxy.Result(ctx) if err != nil { return nil, r.c.wrapSolveError(err) } diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go index 85a42e299d..79825d0b65 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go @@ -39,7 +39,6 @@ import ( opspb "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" "github.com/moby/buildkit/util/bklog" - "github.com/moby/buildkit/util/buildinfo" "github.com/moby/buildkit/util/grpcerrors" "github.com/moby/buildkit/util/stack" "github.com/moby/buildkit/util/tracing" @@ -226,10 +225,11 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten env = append(env, "BUILDKIT_EXPORTEDPRODUCT="+apicaps.ExportedProduct) meta := executor.Meta{ - Env: env, - Args: args, - Cwd: cwd, - ReadonlyRootFS: readonly, + Env: env, + Args: args, + Cwd: cwd, + ReadonlyRootFS: readonly, + RemoveMountStubsRecursive: true, } if v, ok := img.Config.Labels["moby.buildkit.frontend.network.none"]; ok { @@ -353,25 +353,19 @@ func (lbf *llbBridgeForwarder) Discard() { } for id, workerRef := range lbf.workerRefByID { - workerRef.ImmutableRef.Release(context.TODO()) + workerRef.Release(context.TODO()) delete(lbf.workerRefByID, id) } - for id, r := range lbf.refs { - if lbf.err == nil && lbf.result != nil { - keep := false - lbf.result.EachRef(func(r2 solver.ResultProxy) error { - if r == r2 { - keep = true - } - return nil - }) - if keep { - continue - } - } - r.Release(context.TODO()) - delete(lbf.refs, id) + if lbf.err != nil && lbf.result != nil { + lbf.result.EachRef(func(r solver.ResultProxy) error { + r.Release(context.TODO()) + return nil + }) } + for _, r := range lbf.refs { + r.Release(context.TODO()) + } + lbf.refs = map[string]solver.ResultProxy{} } func (lbf *llbBridgeForwarder) Done() <-chan struct{} { @@ -547,9 +541,14 @@ func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.R } } dgst, dt, err := lbf.llbBridge.ResolveImageConfig(ctx, req.Ref, llb.ResolveImageConfigOpt{ - Platform: platform, - ResolveMode: req.ResolveMode, - LogName: req.LogName, + ResolverType: llb.ResolverType(req.ResolverType), + Platform: platform, + ResolveMode: req.ResolveMode, + LogName: req.LogName, + Store: llb.ResolveImageConfigOptStore{ + SessionID: req.SessionID, + StoreID: req.StoreID, + }, }) if err != nil { return nil, err @@ -560,20 +559,6 @@ func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.R }, nil } -func translateLegacySolveRequest(req *pb.SolveRequest) error { - // translates ImportCacheRefs to new CacheImports (v0.4.0) - for _, legacyImportRef := range req.ImportCacheRefsDeprecated { - im := &pb.CacheOptionsEntry{ - Type: "registry", - Attrs: map[string]string{"ref": legacyImportRef}, - } - // FIXME(AkihiroSuda): skip append if already exists - req.CacheImports = append(req.CacheImports, im) - } - req.ImportCacheRefsDeprecated = nil - return nil -} - func (lbf *llbBridgeForwarder) wrapSolveError(solveErr error) error { var ( ee *llberrdefs.ExecError @@ -628,9 +613,6 @@ func (lbf *llbBridgeForwarder) registerResultIDs(results ...solver.Result) (ids } func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) (*pb.SolveResponse, error) { - if err := translateLegacySolveRequest(req); err != nil { - return nil, err - } var cacheImports []frontend.CacheOptionsEntry for _, e := range req.CacheImports { cacheImports = append(cacheImports, frontend.CacheOptionsEntry{ @@ -647,6 +629,7 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) FrontendOpt: req.FrontendOpt, FrontendInputs: req.FrontendInputs, CacheImports: cacheImports, + SourcePolicies: req.SourcePolicies, }, lbf.sid) if err != nil { return nil, lbf.wrapSolveError(err) @@ -663,6 +646,7 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) var defaultID string lbf.mu.Lock() + if res.Refs != nil { ids := make(map[string]string, len(res.Refs)) defs := make(map[string]*opspb.Definition, len(res.Refs)) @@ -671,16 +655,6 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) if ref == nil { id = "" } else { - dtbi, err := buildinfo.Encode(ctx, pbRes.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), ref.BuildSources()) - if err != nil { - return nil, err - } - if dtbi != nil && len(dtbi) > 0 { - if pbRes.Metadata == nil { - pbRes.Metadata = make(map[string][]byte) - } - pbRes.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k)] = dtbi - } lbf.refs[id] = ref } ids[k] = id @@ -704,16 +678,6 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) if ref == nil { id = "" } else { - dtbi, err := buildinfo.Encode(ctx, pbRes.Metadata, exptypes.ExporterBuildInfo, ref.BuildSources()) - if err != nil { - return nil, err - } - if dtbi != nil && len(dtbi) > 0 { - if pbRes.Metadata == nil { - pbRes.Metadata = make(map[string][]byte) - } - pbRes.Metadata[exptypes.ExporterBuildInfo] = dtbi - } def = ref.Definition() lbf.refs[id] = ref } @@ -725,6 +689,31 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) pbRes.Result = &pb.Result_RefDeprecated{RefDeprecated: id} } } + + if res.Attestations != nil { + pbRes.Attestations = map[string]*pb.Attestations{} + for k, atts := range res.Attestations { + for _, att := range atts { + pbAtt, err := gwclient.AttestationToPB(&att) + if err != nil { + return nil, err + } + + if att.Ref != nil { + id := identity.NewID() + def := att.Ref.Definition() + lbf.refs[id] = att.Ref + pbAtt.Ref = &pb.Ref{Id: id, Def: def} + } + + if pbRes.Attestations[k] == nil { + pbRes.Attestations[k] = &pb.Attestations{} + } + pbRes.Attestations[k].Attestation = append(pbRes.Attestations[k].Attestation, pbAtt) + } + } + } + lbf.mu.Unlock() // compatibility mode for older clients @@ -757,15 +746,15 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) return resp, nil } -func (lbf *llbBridgeForwarder) getImmutableRef(ctx context.Context, id, path string) (cache.ImmutableRef, error) { +func (lbf *llbBridgeForwarder) getImmutableRef(ctx context.Context, id string) (cache.ImmutableRef, error) { lbf.mu.Lock() ref, ok := lbf.refs[id] lbf.mu.Unlock() if !ok { - return nil, errors.Errorf("no such ref: %v", id) + return nil, errors.Errorf("no such ref: %s", id) } if ref == nil { - return nil, errors.Wrap(os.ErrNotExist, path) + return nil, errors.Errorf("empty ref: %s", id) } r, err := ref.Result(ctx) @@ -784,7 +773,7 @@ func (lbf *llbBridgeForwarder) getImmutableRef(ctx context.Context, id, path str func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileRequest) (*pb.ReadFileResponse, error) { ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) - ref, err := lbf.getImmutableRef(ctx, req.Ref, req.FilePath) + ref, err := lbf.getImmutableRef(ctx, req.Ref) if err != nil { return nil, err } @@ -799,9 +788,12 @@ func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileReq } } - m, err := ref.Mount(ctx, true, session.NewGroup(lbf.sid)) - if err != nil { - return nil, err + var m snapshot.Mountable + if ref != nil { + m, err = ref.Mount(ctx, true, session.NewGroup(lbf.sid)) + if err != nil { + return nil, err + } } dt, err := cacheutil.ReadFile(ctx, m, newReq) @@ -815,7 +807,7 @@ func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileReq func (lbf *llbBridgeForwarder) ReadDir(ctx context.Context, req *pb.ReadDirRequest) (*pb.ReadDirResponse, error) { ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) - ref, err := lbf.getImmutableRef(ctx, req.Ref, req.DirPath) + ref, err := lbf.getImmutableRef(ctx, req.Ref) if err != nil { return nil, err } @@ -824,9 +816,12 @@ func (lbf *llbBridgeForwarder) ReadDir(ctx context.Context, req *pb.ReadDirReque Path: req.DirPath, IncludePattern: req.IncludePattern, } - m, err := ref.Mount(ctx, true, session.NewGroup(lbf.sid)) - if err != nil { - return nil, err + var m snapshot.Mountable + if ref != nil { + m, err = ref.Mount(ctx, true, session.NewGroup(lbf.sid)) + if err != nil { + return nil, err + } } entries, err := cacheutil.ReadDir(ctx, m, newReq) if err != nil { @@ -839,13 +834,16 @@ func (lbf *llbBridgeForwarder) ReadDir(ctx context.Context, req *pb.ReadDirReque func (lbf *llbBridgeForwarder) StatFile(ctx context.Context, req *pb.StatFileRequest) (*pb.StatFileResponse, error) { ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) - ref, err := lbf.getImmutableRef(ctx, req.Ref, req.Path) + ref, err := lbf.getImmutableRef(ctx, req.Ref) if err != nil { return nil, err } - m, err := ref.Mount(ctx, true, session.NewGroup(lbf.sid)) - if err != nil { - return nil, err + var m snapshot.Mountable + if ref != nil { + m, err = ref.Mount(ctx, true, session.NewGroup(lbf.sid)) + if err != nil { + return nil, err + } } st, err := cacheutil.StatFile(ctx, m, req.Path) if err != nil { @@ -855,6 +853,16 @@ func (lbf *llbBridgeForwarder) StatFile(ctx context.Context, req *pb.StatFileReq return &pb.StatFileResponse{Stat: st}, nil } +func (lbf *llbBridgeForwarder) Evaluate(ctx context.Context, req *pb.EvaluateRequest) (*pb.EvaluateResponse, error) { + ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) + + _, err := lbf.getImmutableRef(ctx, req.Ref) + if err != nil { + return nil, err + } + return &pb.EvaluateResponse{}, nil +} + func (lbf *llbBridgeForwarder) Ping(context.Context, *pb.PingRequest) (*pb.PongResponse, error) { workers := lbf.workers.WorkerInfos() pbWorkers := make([]*apitypes.WorkerRecord, 0, len(workers)) @@ -887,38 +895,54 @@ func (lbf *llbBridgeForwarder) Return(ctx context.Context, in *pb.ReturnRequest) switch res := in.Result.Result.(type) { case *pb.Result_RefDeprecated: - ref, err := lbf.convertRef(res.RefDeprecated) + ref, err := lbf.cloneRef(res.RefDeprecated) if err != nil { return nil, err } - r.Ref = ref + r.SetRef(ref) case *pb.Result_RefsDeprecated: - m := map[string]solver.ResultProxy{} for k, id := range res.RefsDeprecated.Refs { - ref, err := lbf.convertRef(id) + ref, err := lbf.cloneRef(id) if err != nil { return nil, err } - m[k] = ref + r.AddRef(k, ref) } - r.Refs = m case *pb.Result_Ref: - ref, err := lbf.convertRef(res.Ref.Id) + ref, err := lbf.cloneRef(res.Ref.Id) if err != nil { return nil, err } - r.Ref = ref + r.SetRef(ref) case *pb.Result_Refs: - m := map[string]solver.ResultProxy{} for k, ref := range res.Refs.Refs { - ref, err := lbf.convertRef(ref.Id) + ref, err := lbf.cloneRef(ref.Id) if err != nil { return nil, err } - m[k] = ref + r.AddRef(k, ref) } - r.Refs = m } + + if in.Result.Attestations != nil { + for k, pbAtts := range in.Result.Attestations { + for _, pbAtt := range pbAtts.Attestation { + att, err := gwclient.AttestationFromPB[solver.ResultProxy](pbAtt) + if err != nil { + return nil, err + } + if pbAtt.Ref != nil { + ref, err := lbf.cloneRef(pbAtt.Ref.Id) + if err != nil { + return nil, err + } + att.Ref = ref + } + r.AddAttestation(k, *att) + } + } + } + return lbf.setResult(r, nil) } @@ -1259,15 +1283,16 @@ func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) e pios[pid] = pio proc, err := ctr.Start(initCtx, gwclient.StartRequest{ - Args: init.Meta.Args, - Env: init.Meta.Env, - User: init.Meta.User, - Cwd: init.Meta.Cwd, - Tty: init.Tty, - Stdin: pio.processReaders[0], - Stdout: pio.processWriters[1], - Stderr: pio.processWriters[2], - SecurityMode: init.Security, + Args: init.Meta.Args, + Env: init.Meta.Env, + User: init.Meta.User, + Cwd: init.Meta.Cwd, + Tty: init.Tty, + Stdin: pio.processReaders[0], + Stdout: pio.processWriters[1], + Stderr: pio.processWriters[2], + SecurityMode: init.Security, + RemoveMountStubsRecursive: init.Meta.RemoveMountStubsRecursive, }) if err != nil { return stack.Enable(err) @@ -1406,10 +1431,27 @@ func (lbf *llbBridgeForwarder) convertRef(id string) (solver.ResultProxy, error) if !ok { return nil, errors.Errorf("return reference %s not found", id) } - return r, nil } +func (lbf *llbBridgeForwarder) cloneRef(id string) (solver.ResultProxy, error) { + if id == "" { + return nil, nil + } + + lbf.mu.Lock() + defer lbf.mu.Unlock() + + r, ok := lbf.refs[id] + if !ok { + return nil, errors.Errorf("return reference %s not found", id) + } + + s1, s2 := solver.SplitResultProxy(r) + lbf.refs[id] = s1 + return s2, nil +} + func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) { go func() { <-ctx.Done() diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go index d8e2799ff0..1b000a816e 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go @@ -115,7 +115,7 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro req := &pb.ReturnRequest{} if retError == nil { if res == nil { - res = &client.Result{} + res = client.NewResult() } pbRes := &pb.Result{ Metadata: res.Metadata, @@ -160,6 +160,31 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro } } } + + if res.Attestations != nil { + attestations := map[string]*pb.Attestations{} + for k, as := range res.Attestations { + for _, a := range as { + pbAtt, err := client.AttestationToPB(&a) + if err != nil { + retError = err + continue + } + pbRef, err := convertRef(a.Ref) + if err != nil { + retError = err + continue + } + pbAtt.Ref = pbRef + if attestations[k] == nil { + attestations[k] = &pb.Attestations{} + } + attestations[k].Attestation = append(attestations[k].Attestation, pbAtt) + } + } + pbRes.Attestations = attestations + } + if retError == nil { req.Result = pbRes } @@ -323,22 +348,12 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res * } } } - var ( - // old API - legacyRegistryCacheImports []string - // new API (CapImportCaches) - cacheImports []*pb.CacheOptionsEntry - ) - supportCapImportCaches := c.caps.Supports(pb.CapImportCaches) == nil + var cacheImports []*pb.CacheOptionsEntry for _, im := range creq.CacheImports { - if !supportCapImportCaches && im.Type == "registry" { - legacyRegistryCacheImports = append(legacyRegistryCacheImports, im.Attrs["ref"]) - } else { - cacheImports = append(cacheImports, &pb.CacheOptionsEntry{ - Type: im.Type, - Attrs: im.Attrs, - }) - } + cacheImports = append(cacheImports, &pb.CacheOptionsEntry{ + Type: im.Type, + Attrs: im.Attrs, + }) } // these options are added by go client in solve() @@ -366,10 +381,8 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res * FrontendInputs: creq.FrontendInputs, AllowResultReturn: true, AllowResultArrayRef: true, - // old API - ImportCacheRefsDeprecated: legacyRegistryCacheImports, - // new API - CacheImports: cacheImports, + CacheImports: cacheImports, + SourcePolicies: creq.SourcePolicies, } // backwards compatibility with inline return @@ -381,30 +394,15 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res * if c.caps.Supports(pb.CapGatewayEvaluateSolve) == nil { req.Evaluate = creq.Evaluate } else { - // If evaluate is not supported, fallback to running Stat(".") in order to - // trigger an evaluation of the result. + // If evaluate is not supported, fallback to running Stat(".") in + // order to trigger an evaluation of the result. defer func() { if res == nil { return } - - var ( - id string - ref client.Reference - ) - ref, err = res.SingleRef() - if err != nil { - for refID := range res.Refs { - id = refID - break - } - } else { - id = ref.(*reference).id - } - - _, err = c.client.StatFile(ctx, &pb.StatFileRequest{ - Ref: id, - Path: ".", + err = res.EachRef(func(ref client.Reference) error { + _, err := ref.StatFile(ctx, client.StatRequest{Path: "."}) + return err }) }() } @@ -415,7 +413,7 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res * return nil, err } - res = &client.Result{} + res = client.NewResult() if resp.Result == nil { if id := resp.Ref; id != "" { c.requests[id] = req @@ -456,6 +454,25 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res * res.AddRef(k, ref) } } + + if resp.Result.Attestations != nil { + for p, as := range resp.Result.Attestations { + for _, a := range as.Attestation { + att, err := client.AttestationFromPB[client.Reference](a) + if err != nil { + return nil, err + } + if a.Ref.Id != "" { + ref, err := newReference(c, a.Ref) + if err != nil { + return nil, err + } + att.Ref = ref + } + res.AddAttestation(p, *att) + } + } + } } return res, nil @@ -472,7 +489,15 @@ func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb OSFeatures: platform.OSFeatures, } } - resp, err := c.client.ResolveImageConfig(ctx, &pb.ResolveImageConfigRequest{Ref: ref, Platform: p, ResolveMode: opt.ResolveMode, LogName: opt.LogName}) + resp, err := c.client.ResolveImageConfig(ctx, &pb.ResolveImageConfigRequest{ + ResolverType: int32(opt.ResolverType), + Ref: ref, + Platform: p, + ResolveMode: opt.ResolveMode, + LogName: opt.LogName, + SessionID: opt.Store.SessionID, + StoreID: opt.Store.StoreID, + }) if err != nil { return "", nil, err } @@ -806,6 +831,7 @@ func (ctr *container) Start(ctx context.Context, req client.StartRequest) (clien Tty: req.Tty, Security: req.SecurityMode, } + init.Meta.RemoveMountStubsRecursive = req.RemoveMountStubsRecursive if req.Stdin != nil { init.Fds = append(init.Fds, 0) } @@ -1036,6 +1062,15 @@ func (r *reference) ToState() (st llb.State, err error) { return llb.NewState(defop), nil } +func (r *reference) Evaluate(ctx context.Context) error { + req := &pb.EvaluateRequest{Ref: r.id} + _, err := r.c.client.Evaluate(ctx, req) + if err != nil { + return err + } + return nil +} + func (r *reference) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) { rfr := &pb.ReadFileRequest{FilePath: req.Filename, Ref: r.id} if r := req.Range; r != nil { diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go index c4af39f3f0..deb192dc11 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go @@ -56,8 +56,14 @@ const ( // errors. CapGatewayEvaluateSolve apicaps.CapID = "gateway.solve.evaluate" + CapGatewayEvaluate apicaps.CapID = "gateway.evaluate" + // CapGatewayWarnings is the capability to log warnings from frontend CapGatewayWarnings apicaps.CapID = "gateway.warnings" + + // CapAttestations is the capability to indicate that attestation + // references will be attached to results + CapAttestations apicaps.CapID = "reference.attestations" ) func init() { @@ -194,10 +200,24 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapGatewayEvaluate, + Name: "gateway evaluate", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapGatewayWarnings, Name: "logging warnings", Enabled: true, Status: apicaps.CapStatusExperimental, }) + + Caps.Init(apicaps.Cap{ + ID: CapAttestations, + Name: "reference attestations", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) } diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go index e8e797ca7e..da36afdd14 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go @@ -11,7 +11,8 @@ import ( proto "github.com/gogo/protobuf/proto" types1 "github.com/moby/buildkit/api/types" pb "github.com/moby/buildkit/solver/pb" - pb1 "github.com/moby/buildkit/util/apicaps/pb" + pb1 "github.com/moby/buildkit/sourcepolicy/pb" + pb2 "github.com/moby/buildkit/util/apicaps/pb" github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" types "github.com/tonistiigi/fsutil/types" grpc "google.golang.org/grpc" @@ -33,17 +34,70 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +type AttestationKind int32 + +const ( + AttestationKindInToto AttestationKind = 0 + AttestationKindBundle AttestationKind = 1 +) + +var AttestationKind_name = map[int32]string{ + 0: "InToto", + 1: "Bundle", +} + +var AttestationKind_value = map[string]int32{ + "InToto": 0, + "Bundle": 1, +} + +func (x AttestationKind) String() string { + return proto.EnumName(AttestationKind_name, int32(x)) +} + +func (AttestationKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{0} +} + +type InTotoSubjectKind int32 + +const ( + InTotoSubjectKindSelf InTotoSubjectKind = 0 + InTotoSubjectKindRaw InTotoSubjectKind = 1 +) + +var InTotoSubjectKind_name = map[int32]string{ + 0: "Self", + 1: "Raw", +} + +var InTotoSubjectKind_value = map[string]int32{ + "Self": 0, + "Raw": 1, +} + +func (x InTotoSubjectKind) String() string { + return proto.EnumName(InTotoSubjectKind_name, int32(x)) +} + +func (InTotoSubjectKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{1} +} + type Result struct { // Types that are valid to be assigned to Result: + // // *Result_RefDeprecated // *Result_RefsDeprecated // *Result_Ref // *Result_Refs - Result isResult_Result `protobuf_oneof:"result"` - Metadata map[string][]byte `protobuf:"bytes,10,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Result isResult_Result `protobuf_oneof:"result"` + Metadata map[string][]byte `protobuf:"bytes,10,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // 11 was used during development and is reserved for old attestation format + Attestations map[string]*Attestations `protobuf:"bytes,12,rep,name=attestations,proto3" json:"attestations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Result) Reset() { *m = Result{} } @@ -145,6 +199,13 @@ func (m *Result) GetMetadata() map[string][]byte { return nil } +func (m *Result) GetAttestations() map[string]*Attestations { + if m != nil { + return m.Attestations + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Result) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -304,6 +365,196 @@ func (m *RefMap) GetRefs() map[string]*Ref { return nil } +type Attestations struct { + Attestation []*Attestation `protobuf:"bytes,1,rep,name=attestation,proto3" json:"attestation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Attestations) Reset() { *m = Attestations{} } +func (m *Attestations) String() string { return proto.CompactTextString(m) } +func (*Attestations) ProtoMessage() {} +func (*Attestations) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{4} +} +func (m *Attestations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Attestations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Attestations.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Attestations) XXX_Merge(src proto.Message) { + xxx_messageInfo_Attestations.Merge(m, src) +} +func (m *Attestations) XXX_Size() int { + return m.Size() +} +func (m *Attestations) XXX_DiscardUnknown() { + xxx_messageInfo_Attestations.DiscardUnknown(m) +} + +var xxx_messageInfo_Attestations proto.InternalMessageInfo + +func (m *Attestations) GetAttestation() []*Attestation { + if m != nil { + return m.Attestation + } + return nil +} + +type Attestation struct { + Kind AttestationKind `protobuf:"varint,1,opt,name=kind,proto3,enum=moby.buildkit.v1.frontend.AttestationKind" json:"kind,omitempty"` + Metadata map[string][]byte `protobuf:"bytes,2,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Ref *Ref `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` + Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` + InTotoPredicateType string `protobuf:"bytes,5,opt,name=inTotoPredicateType,proto3" json:"inTotoPredicateType,omitempty"` + InTotoSubjects []*InTotoSubject `protobuf:"bytes,6,rep,name=inTotoSubjects,proto3" json:"inTotoSubjects,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Attestation) Reset() { *m = Attestation{} } +func (m *Attestation) String() string { return proto.CompactTextString(m) } +func (*Attestation) ProtoMessage() {} +func (*Attestation) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{5} +} +func (m *Attestation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Attestation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Attestation.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Attestation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Attestation.Merge(m, src) +} +func (m *Attestation) XXX_Size() int { + return m.Size() +} +func (m *Attestation) XXX_DiscardUnknown() { + xxx_messageInfo_Attestation.DiscardUnknown(m) +} + +var xxx_messageInfo_Attestation proto.InternalMessageInfo + +func (m *Attestation) GetKind() AttestationKind { + if m != nil { + return m.Kind + } + return AttestationKindInToto +} + +func (m *Attestation) GetMetadata() map[string][]byte { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Attestation) GetRef() *Ref { + if m != nil { + return m.Ref + } + return nil +} + +func (m *Attestation) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *Attestation) GetInTotoPredicateType() string { + if m != nil { + return m.InTotoPredicateType + } + return "" +} + +func (m *Attestation) GetInTotoSubjects() []*InTotoSubject { + if m != nil { + return m.InTotoSubjects + } + return nil +} + +type InTotoSubject struct { + Kind InTotoSubjectKind `protobuf:"varint,1,opt,name=kind,proto3,enum=moby.buildkit.v1.frontend.InTotoSubjectKind" json:"kind,omitempty"` + Digest []github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,rep,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InTotoSubject) Reset() { *m = InTotoSubject{} } +func (m *InTotoSubject) String() string { return proto.CompactTextString(m) } +func (*InTotoSubject) ProtoMessage() {} +func (*InTotoSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{6} +} +func (m *InTotoSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InTotoSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InTotoSubject.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *InTotoSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_InTotoSubject.Merge(m, src) +} +func (m *InTotoSubject) XXX_Size() int { + return m.Size() +} +func (m *InTotoSubject) XXX_DiscardUnknown() { + xxx_messageInfo_InTotoSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_InTotoSubject proto.InternalMessageInfo + +func (m *InTotoSubject) GetKind() InTotoSubjectKind { + if m != nil { + return m.Kind + } + return InTotoSubjectKindSelf +} + +func (m *InTotoSubject) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type ReturnRequest struct { Result *Result `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` Error *rpc.Status `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` @@ -316,7 +567,7 @@ func (m *ReturnRequest) Reset() { *m = ReturnRequest{} } func (m *ReturnRequest) String() string { return proto.CompactTextString(m) } func (*ReturnRequest) ProtoMessage() {} func (*ReturnRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{4} + return fileDescriptor_f1a937782ebbded5, []int{7} } func (m *ReturnRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -369,7 +620,7 @@ func (m *ReturnResponse) Reset() { *m = ReturnResponse{} } func (m *ReturnResponse) String() string { return proto.CompactTextString(m) } func (*ReturnResponse) ProtoMessage() {} func (*ReturnResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{5} + return fileDescriptor_f1a937782ebbded5, []int{8} } func (m *ReturnResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -408,7 +659,7 @@ func (m *InputsRequest) Reset() { *m = InputsRequest{} } func (m *InputsRequest) String() string { return proto.CompactTextString(m) } func (*InputsRequest) ProtoMessage() {} func (*InputsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{6} + return fileDescriptor_f1a937782ebbded5, []int{9} } func (m *InputsRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -448,7 +699,7 @@ func (m *InputsResponse) Reset() { *m = InputsResponse{} } func (m *InputsResponse) String() string { return proto.CompactTextString(m) } func (*InputsResponse) ProtoMessage() {} func (*InputsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{7} + return fileDescriptor_f1a937782ebbded5, []int{10} } func (m *InputsResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -489,6 +740,9 @@ type ResolveImageConfigRequest struct { Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform,proto3" json:"Platform,omitempty"` ResolveMode string `protobuf:"bytes,3,opt,name=ResolveMode,proto3" json:"ResolveMode,omitempty"` LogName string `protobuf:"bytes,4,opt,name=LogName,proto3" json:"LogName,omitempty"` + ResolverType int32 `protobuf:"varint,5,opt,name=ResolverType,proto3" json:"ResolverType,omitempty"` + SessionID string `protobuf:"bytes,6,opt,name=SessionID,proto3" json:"SessionID,omitempty"` + StoreID string `protobuf:"bytes,7,opt,name=StoreID,proto3" json:"StoreID,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -498,7 +752,7 @@ func (m *ResolveImageConfigRequest) Reset() { *m = ResolveImageConfigReq func (m *ResolveImageConfigRequest) String() string { return proto.CompactTextString(m) } func (*ResolveImageConfigRequest) ProtoMessage() {} func (*ResolveImageConfigRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{8} + return fileDescriptor_f1a937782ebbded5, []int{11} } func (m *ResolveImageConfigRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,6 +809,27 @@ func (m *ResolveImageConfigRequest) GetLogName() string { return "" } +func (m *ResolveImageConfigRequest) GetResolverType() int32 { + if m != nil { + return m.ResolverType + } + return 0 +} + +func (m *ResolveImageConfigRequest) GetSessionID() string { + if m != nil { + return m.SessionID + } + return "" +} + +func (m *ResolveImageConfigRequest) GetStoreID() string { + if m != nil { + return m.StoreID + } + return "" +} + type ResolveImageConfigResponse struct { Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=Digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"Digest"` Config []byte `protobuf:"bytes,2,opt,name=Config,proto3" json:"Config,omitempty"` @@ -567,7 +842,7 @@ func (m *ResolveImageConfigResponse) Reset() { *m = ResolveImageConfigRe func (m *ResolveImageConfigResponse) String() string { return proto.CompactTextString(m) } func (*ResolveImageConfigResponse) ProtoMessage() {} func (*ResolveImageConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{9} + return fileDescriptor_f1a937782ebbded5, []int{12} } func (m *ResolveImageConfigResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -607,13 +882,9 @@ type SolveRequest struct { Definition *pb.Definition `protobuf:"bytes,1,opt,name=Definition,proto3" json:"Definition,omitempty"` Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"` FrontendOpt map[string]string `protobuf:"bytes,3,rep,name=FrontendOpt,proto3" json:"FrontendOpt,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // ImportCacheRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0. - // When ImportCacheRefsDeprecated is set, the solver appends - // {.Type = "registry", .Attrs = {"ref": importCacheRef}} - // for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed) - ImportCacheRefsDeprecated []string `protobuf:"bytes,4,rep,name=ImportCacheRefsDeprecated,proto3" json:"ImportCacheRefsDeprecated,omitempty"` - AllowResultReturn bool `protobuf:"varint,5,opt,name=allowResultReturn,proto3" json:"allowResultReturn,omitempty"` - AllowResultArrayRef bool `protobuf:"varint,6,opt,name=allowResultArrayRef,proto3" json:"allowResultArrayRef,omitempty"` + // 4 was removed in BuildKit v0.11.0. + AllowResultReturn bool `protobuf:"varint,5,opt,name=allowResultReturn,proto3" json:"allowResultReturn,omitempty"` + AllowResultArrayRef bool `protobuf:"varint,6,opt,name=allowResultArrayRef,proto3" json:"allowResultArrayRef,omitempty"` // apicaps.CapSolveInlineReturn deprecated Final bool `protobuf:"varint,10,opt,name=Final,proto3" json:"Final,omitempty"` ExporterAttr []byte `protobuf:"bytes,11,opt,name=ExporterAttr,proto3" json:"ExporterAttr,omitempty"` @@ -623,6 +894,7 @@ type SolveRequest struct { // apicaps:CapFrontendInputs FrontendInputs map[string]*pb.Definition `protobuf:"bytes,13,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Evaluate bool `protobuf:"varint,14,opt,name=Evaluate,proto3" json:"Evaluate,omitempty"` + SourcePolicies []*pb1.Policy `protobuf:"bytes,15,rep,name=SourcePolicies,proto3" json:"SourcePolicies,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -632,7 +904,7 @@ func (m *SolveRequest) Reset() { *m = SolveRequest{} } func (m *SolveRequest) String() string { return proto.CompactTextString(m) } func (*SolveRequest) ProtoMessage() {} func (*SolveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{10} + return fileDescriptor_f1a937782ebbded5, []int{13} } func (m *SolveRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -682,13 +954,6 @@ func (m *SolveRequest) GetFrontendOpt() map[string]string { return nil } -func (m *SolveRequest) GetImportCacheRefsDeprecated() []string { - if m != nil { - return m.ImportCacheRefsDeprecated - } - return nil -} - func (m *SolveRequest) GetAllowResultReturn() bool { if m != nil { return m.AllowResultReturn @@ -738,6 +1003,13 @@ func (m *SolveRequest) GetEvaluate() bool { return false } +func (m *SolveRequest) GetSourcePolicies() []*pb1.Policy { + if m != nil { + return m.SourcePolicies + } + return nil +} + // CacheOptionsEntry corresponds to the control.CacheOptionsEntry type CacheOptionsEntry struct { Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` @@ -751,7 +1023,7 @@ func (m *CacheOptionsEntry) Reset() { *m = CacheOptionsEntry{} } func (m *CacheOptionsEntry) String() string { return proto.CompactTextString(m) } func (*CacheOptionsEntry) ProtoMessage() {} func (*CacheOptionsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{11} + return fileDescriptor_f1a937782ebbded5, []int{14} } func (m *CacheOptionsEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -808,7 +1080,7 @@ func (m *SolveResponse) Reset() { *m = SolveResponse{} } func (m *SolveResponse) String() string { return proto.CompactTextString(m) } func (*SolveResponse) ProtoMessage() {} func (*SolveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{12} + return fileDescriptor_f1a937782ebbded5, []int{15} } func (m *SolveResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -864,7 +1136,7 @@ func (m *ReadFileRequest) Reset() { *m = ReadFileRequest{} } func (m *ReadFileRequest) String() string { return proto.CompactTextString(m) } func (*ReadFileRequest) ProtoMessage() {} func (*ReadFileRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{13} + return fileDescriptor_f1a937782ebbded5, []int{16} } func (m *ReadFileRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -926,7 +1198,7 @@ func (m *FileRange) Reset() { *m = FileRange{} } func (m *FileRange) String() string { return proto.CompactTextString(m) } func (*FileRange) ProtoMessage() {} func (*FileRange) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{14} + return fileDescriptor_f1a937782ebbded5, []int{17} } func (m *FileRange) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -980,7 +1252,7 @@ func (m *ReadFileResponse) Reset() { *m = ReadFileResponse{} } func (m *ReadFileResponse) String() string { return proto.CompactTextString(m) } func (*ReadFileResponse) ProtoMessage() {} func (*ReadFileResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{15} + return fileDescriptor_f1a937782ebbded5, []int{18} } func (m *ReadFileResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1029,7 +1301,7 @@ func (m *ReadDirRequest) Reset() { *m = ReadDirRequest{} } func (m *ReadDirRequest) String() string { return proto.CompactTextString(m) } func (*ReadDirRequest) ProtoMessage() {} func (*ReadDirRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{16} + return fileDescriptor_f1a937782ebbded5, []int{19} } func (m *ReadDirRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1090,7 +1362,7 @@ func (m *ReadDirResponse) Reset() { *m = ReadDirResponse{} } func (m *ReadDirResponse) String() string { return proto.CompactTextString(m) } func (*ReadDirResponse) ProtoMessage() {} func (*ReadDirResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{17} + return fileDescriptor_f1a937782ebbded5, []int{20} } func (m *ReadDirResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1138,7 +1410,7 @@ func (m *StatFileRequest) Reset() { *m = StatFileRequest{} } func (m *StatFileRequest) String() string { return proto.CompactTextString(m) } func (*StatFileRequest) ProtoMessage() {} func (*StatFileRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{18} + return fileDescriptor_f1a937782ebbded5, []int{21} } func (m *StatFileRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1192,7 +1464,7 @@ func (m *StatFileResponse) Reset() { *m = StatFileResponse{} } func (m *StatFileResponse) String() string { return proto.CompactTextString(m) } func (*StatFileResponse) ProtoMessage() {} func (*StatFileResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{19} + return fileDescriptor_f1a937782ebbded5, []int{22} } func (m *StatFileResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1228,6 +1500,92 @@ func (m *StatFileResponse) GetStat() *types.Stat { return nil } +type EvaluateRequest struct { + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EvaluateRequest) Reset() { *m = EvaluateRequest{} } +func (m *EvaluateRequest) String() string { return proto.CompactTextString(m) } +func (*EvaluateRequest) ProtoMessage() {} +func (*EvaluateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{23} +} +func (m *EvaluateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvaluateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvaluateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvaluateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvaluateRequest.Merge(m, src) +} +func (m *EvaluateRequest) XXX_Size() int { + return m.Size() +} +func (m *EvaluateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_EvaluateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_EvaluateRequest proto.InternalMessageInfo + +func (m *EvaluateRequest) GetRef() string { + if m != nil { + return m.Ref + } + return "" +} + +type EvaluateResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EvaluateResponse) Reset() { *m = EvaluateResponse{} } +func (m *EvaluateResponse) String() string { return proto.CompactTextString(m) } +func (*EvaluateResponse) ProtoMessage() {} +func (*EvaluateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{24} +} +func (m *EvaluateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EvaluateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EvaluateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EvaluateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvaluateResponse.Merge(m, src) +} +func (m *EvaluateResponse) XXX_Size() int { + return m.Size() +} +func (m *EvaluateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_EvaluateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_EvaluateResponse proto.InternalMessageInfo + type PingRequest struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -1238,7 +1596,7 @@ func (m *PingRequest) Reset() { *m = PingRequest{} } func (m *PingRequest) String() string { return proto.CompactTextString(m) } func (*PingRequest) ProtoMessage() {} func (*PingRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{20} + return fileDescriptor_f1a937782ebbded5, []int{25} } func (m *PingRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1268,8 +1626,8 @@ func (m *PingRequest) XXX_DiscardUnknown() { var xxx_messageInfo_PingRequest proto.InternalMessageInfo type PongResponse struct { - FrontendAPICaps []pb1.APICap `protobuf:"bytes,1,rep,name=FrontendAPICaps,proto3" json:"FrontendAPICaps"` - LLBCaps []pb1.APICap `protobuf:"bytes,2,rep,name=LLBCaps,proto3" json:"LLBCaps"` + FrontendAPICaps []pb2.APICap `protobuf:"bytes,1,rep,name=FrontendAPICaps,proto3" json:"FrontendAPICaps"` + LLBCaps []pb2.APICap `protobuf:"bytes,2,rep,name=LLBCaps,proto3" json:"LLBCaps"` Workers []*types1.WorkerRecord `protobuf:"bytes,3,rep,name=Workers,proto3" json:"Workers,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -1280,7 +1638,7 @@ func (m *PongResponse) Reset() { *m = PongResponse{} } func (m *PongResponse) String() string { return proto.CompactTextString(m) } func (*PongResponse) ProtoMessage() {} func (*PongResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{21} + return fileDescriptor_f1a937782ebbded5, []int{26} } func (m *PongResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1309,14 +1667,14 @@ func (m *PongResponse) XXX_DiscardUnknown() { var xxx_messageInfo_PongResponse proto.InternalMessageInfo -func (m *PongResponse) GetFrontendAPICaps() []pb1.APICap { +func (m *PongResponse) GetFrontendAPICaps() []pb2.APICap { if m != nil { return m.FrontendAPICaps } return nil } -func (m *PongResponse) GetLLBCaps() []pb1.APICap { +func (m *PongResponse) GetLLBCaps() []pb2.APICap { if m != nil { return m.LLBCaps } @@ -1347,7 +1705,7 @@ func (m *WarnRequest) Reset() { *m = WarnRequest{} } func (m *WarnRequest) String() string { return proto.CompactTextString(m) } func (*WarnRequest) ProtoMessage() {} func (*WarnRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{22} + return fileDescriptor_f1a937782ebbded5, []int{27} } func (m *WarnRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1428,7 +1786,7 @@ func (m *WarnResponse) Reset() { *m = WarnResponse{} } func (m *WarnResponse) String() string { return proto.CompactTextString(m) } func (*WarnResponse) ProtoMessage() {} func (*WarnResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{23} + return fileDescriptor_f1a937782ebbded5, []int{28} } func (m *WarnResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1474,7 +1832,7 @@ func (m *NewContainerRequest) Reset() { *m = NewContainerRequest{} } func (m *NewContainerRequest) String() string { return proto.CompactTextString(m) } func (*NewContainerRequest) ProtoMessage() {} func (*NewContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{24} + return fileDescriptor_f1a937782ebbded5, []int{29} } func (m *NewContainerRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1555,7 +1913,7 @@ func (m *NewContainerResponse) Reset() { *m = NewContainerResponse{} } func (m *NewContainerResponse) String() string { return proto.CompactTextString(m) } func (*NewContainerResponse) ProtoMessage() {} func (*NewContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{25} + return fileDescriptor_f1a937782ebbded5, []int{30} } func (m *NewContainerResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1595,7 +1953,7 @@ func (m *ReleaseContainerRequest) Reset() { *m = ReleaseContainerRequest func (m *ReleaseContainerRequest) String() string { return proto.CompactTextString(m) } func (*ReleaseContainerRequest) ProtoMessage() {} func (*ReleaseContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{26} + return fileDescriptor_f1a937782ebbded5, []int{31} } func (m *ReleaseContainerRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1641,7 +1999,7 @@ func (m *ReleaseContainerResponse) Reset() { *m = ReleaseContainerRespon func (m *ReleaseContainerResponse) String() string { return proto.CompactTextString(m) } func (*ReleaseContainerResponse) ProtoMessage() {} func (*ReleaseContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{27} + return fileDescriptor_f1a937782ebbded5, []int{32} } func (m *ReleaseContainerResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1673,6 +2031,7 @@ var xxx_messageInfo_ReleaseContainerResponse proto.InternalMessageInfo type ExecMessage struct { ProcessID string `protobuf:"bytes,1,opt,name=ProcessID,proto3" json:"ProcessID,omitempty"` // Types that are valid to be assigned to Input: + // // *ExecMessage_Init // *ExecMessage_File // *ExecMessage_Resize @@ -1690,7 +2049,7 @@ func (m *ExecMessage) Reset() { *m = ExecMessage{} } func (m *ExecMessage) String() string { return proto.CompactTextString(m) } func (*ExecMessage) ProtoMessage() {} func (*ExecMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{28} + return fileDescriptor_f1a937782ebbded5, []int{33} } func (m *ExecMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1846,7 +2205,7 @@ func (m *InitMessage) Reset() { *m = InitMessage{} } func (m *InitMessage) String() string { return proto.CompactTextString(m) } func (*InitMessage) ProtoMessage() {} func (*InitMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{29} + return fileDescriptor_f1a937782ebbded5, []int{34} } func (m *InitMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1922,7 +2281,7 @@ func (m *ExitMessage) Reset() { *m = ExitMessage{} } func (m *ExitMessage) String() string { return proto.CompactTextString(m) } func (*ExitMessage) ProtoMessage() {} func (*ExitMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{30} + return fileDescriptor_f1a937782ebbded5, []int{35} } func (m *ExitMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1975,7 +2334,7 @@ func (m *StartedMessage) Reset() { *m = StartedMessage{} } func (m *StartedMessage) String() string { return proto.CompactTextString(m) } func (*StartedMessage) ProtoMessage() {} func (*StartedMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{31} + return fileDescriptor_f1a937782ebbded5, []int{36} } func (m *StartedMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2014,7 +2373,7 @@ func (m *DoneMessage) Reset() { *m = DoneMessage{} } func (m *DoneMessage) String() string { return proto.CompactTextString(m) } func (*DoneMessage) ProtoMessage() {} func (*DoneMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{32} + return fileDescriptor_f1a937782ebbded5, []int{37} } func (m *DoneMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2056,7 +2415,7 @@ func (m *FdMessage) Reset() { *m = FdMessage{} } func (m *FdMessage) String() string { return proto.CompactTextString(m) } func (*FdMessage) ProtoMessage() {} func (*FdMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{33} + return fileDescriptor_f1a937782ebbded5, []int{38} } func (m *FdMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2118,7 +2477,7 @@ func (m *ResizeMessage) Reset() { *m = ResizeMessage{} } func (m *ResizeMessage) String() string { return proto.CompactTextString(m) } func (*ResizeMessage) ProtoMessage() {} func (*ResizeMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{34} + return fileDescriptor_f1a937782ebbded5, []int{39} } func (m *ResizeMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2174,7 +2533,7 @@ func (m *SignalMessage) Reset() { *m = SignalMessage{} } func (m *SignalMessage) String() string { return proto.CompactTextString(m) } func (*SignalMessage) ProtoMessage() {} func (*SignalMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{35} + return fileDescriptor_f1a937782ebbded5, []int{40} } func (m *SignalMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2211,13 +2570,20 @@ func (m *SignalMessage) GetName() string { } func init() { + proto.RegisterEnum("moby.buildkit.v1.frontend.AttestationKind", AttestationKind_name, AttestationKind_value) + proto.RegisterEnum("moby.buildkit.v1.frontend.InTotoSubjectKind", InTotoSubjectKind_name, InTotoSubjectKind_value) proto.RegisterType((*Result)(nil), "moby.buildkit.v1.frontend.Result") + proto.RegisterMapType((map[string]*Attestations)(nil), "moby.buildkit.v1.frontend.Result.AttestationsEntry") proto.RegisterMapType((map[string][]byte)(nil), "moby.buildkit.v1.frontend.Result.MetadataEntry") proto.RegisterType((*RefMapDeprecated)(nil), "moby.buildkit.v1.frontend.RefMapDeprecated") proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.frontend.RefMapDeprecated.RefsEntry") proto.RegisterType((*Ref)(nil), "moby.buildkit.v1.frontend.Ref") proto.RegisterType((*RefMap)(nil), "moby.buildkit.v1.frontend.RefMap") proto.RegisterMapType((map[string]*Ref)(nil), "moby.buildkit.v1.frontend.RefMap.RefsEntry") + proto.RegisterType((*Attestations)(nil), "moby.buildkit.v1.frontend.Attestations") + proto.RegisterType((*Attestation)(nil), "moby.buildkit.v1.frontend.Attestation") + proto.RegisterMapType((map[string][]byte)(nil), "moby.buildkit.v1.frontend.Attestation.MetadataEntry") + proto.RegisterType((*InTotoSubject)(nil), "moby.buildkit.v1.frontend.InTotoSubject") proto.RegisterType((*ReturnRequest)(nil), "moby.buildkit.v1.frontend.ReturnRequest") proto.RegisterType((*ReturnResponse)(nil), "moby.buildkit.v1.frontend.ReturnResponse") proto.RegisterType((*InputsRequest)(nil), "moby.buildkit.v1.frontend.InputsRequest") @@ -2238,6 +2604,8 @@ func init() { proto.RegisterType((*ReadDirResponse)(nil), "moby.buildkit.v1.frontend.ReadDirResponse") proto.RegisterType((*StatFileRequest)(nil), "moby.buildkit.v1.frontend.StatFileRequest") proto.RegisterType((*StatFileResponse)(nil), "moby.buildkit.v1.frontend.StatFileResponse") + proto.RegisterType((*EvaluateRequest)(nil), "moby.buildkit.v1.frontend.EvaluateRequest") + proto.RegisterType((*EvaluateResponse)(nil), "moby.buildkit.v1.frontend.EvaluateResponse") proto.RegisterType((*PingRequest)(nil), "moby.buildkit.v1.frontend.PingRequest") proto.RegisterType((*PongResponse)(nil), "moby.buildkit.v1.frontend.PongResponse") proto.RegisterType((*WarnRequest)(nil), "moby.buildkit.v1.frontend.WarnRequest") @@ -2259,137 +2627,161 @@ func init() { func init() { proto.RegisterFile("gateway.proto", fileDescriptor_f1a937782ebbded5) } var fileDescriptor_f1a937782ebbded5 = []byte{ - // 2078 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, - 0x15, 0xd7, 0x8a, 0x94, 0x48, 0x3e, 0xfe, 0xb1, 0x32, 0x4e, 0x53, 0x7a, 0x11, 0x38, 0xca, 0x36, - 0x55, 0x69, 0x47, 0x59, 0xa6, 0x72, 0x02, 0xb9, 0x72, 0x90, 0xd4, 0xfa, 0x07, 0x29, 0x91, 0x64, - 0x75, 0x94, 0xc2, 0x40, 0x90, 0x02, 0x5d, 0x71, 0x87, 0xf4, 0xc2, 0xab, 0x9d, 0xed, 0xec, 0xd0, - 0xb2, 0x92, 0x4b, 0x7b, 0xeb, 0xb1, 0x40, 0x81, 0x5e, 0x0b, 0xf4, 0x13, 0xf4, 0x13, 0xf4, 0x9c, - 0x63, 0x8f, 0x45, 0x0f, 0x41, 0xe1, 0xcf, 0x50, 0x14, 0xe8, 0x2d, 0x78, 0x33, 0xb3, 0xe4, 0x92, - 0xa2, 0x96, 0x24, 0x7c, 0xe2, 0xcc, 0xdb, 0xf7, 0x7b, 0xf3, 0xfe, 0xcd, 0x7b, 0x6f, 0x08, 0xf5, - 0x9e, 0x27, 0xd9, 0xa5, 0x77, 0xe5, 0xc6, 0x82, 0x4b, 0x4e, 0xee, 0x5c, 0xf0, 0xf3, 0x2b, 0xf7, - 0xbc, 0x1f, 0x84, 0xfe, 0xf3, 0x40, 0xba, 0x2f, 0x7e, 0xee, 0x76, 0x05, 0x8f, 0x24, 0x8b, 0x7c, - 0xfb, 0x83, 0x5e, 0x20, 0x9f, 0xf5, 0xcf, 0xdd, 0x0e, 0xbf, 0x68, 0xf7, 0x78, 0x8f, 0xb7, 0x15, - 0xe2, 0xbc, 0xdf, 0x55, 0x3b, 0xb5, 0x51, 0x2b, 0x2d, 0xc9, 0xde, 0x18, 0x67, 0xef, 0x71, 0xde, - 0x0b, 0x99, 0x17, 0x07, 0x89, 0x59, 0xb6, 0x45, 0xdc, 0x69, 0x27, 0xd2, 0x93, 0xfd, 0xc4, 0x60, - 0xd6, 0x33, 0x18, 0x54, 0xa4, 0x9d, 0x2a, 0xd2, 0x4e, 0x78, 0xf8, 0x82, 0x89, 0x76, 0x7c, 0xde, - 0xe6, 0x71, 0xca, 0xdd, 0xbe, 0x91, 0xdb, 0x8b, 0x83, 0xb6, 0xbc, 0x8a, 0x59, 0xd2, 0xbe, 0xe4, - 0xe2, 0x39, 0x13, 0x06, 0xf0, 0xe0, 0x46, 0x40, 0x5f, 0x06, 0x21, 0xa2, 0x3a, 0x5e, 0x9c, 0xe0, - 0x21, 0xf8, 0x6b, 0x40, 0x59, 0xb3, 0x25, 0x8f, 0x82, 0x44, 0x06, 0x41, 0x2f, 0x68, 0x77, 0x13, - 0x85, 0xd1, 0xa7, 0xa0, 0x11, 0x9a, 0xdd, 0xf9, 0x63, 0x01, 0x96, 0x29, 0x4b, 0xfa, 0xa1, 0x24, - 0x6b, 0x50, 0x17, 0xac, 0xbb, 0xcb, 0x62, 0xc1, 0x3a, 0x9e, 0x64, 0x7e, 0xd3, 0x5a, 0xb5, 0x5a, - 0x95, 0x83, 0x05, 0x3a, 0x4a, 0x26, 0xbf, 0x86, 0x86, 0x60, 0xdd, 0x24, 0xc3, 0xb8, 0xb8, 0x6a, - 0xb5, 0xaa, 0x1b, 0xef, 0xbb, 0x37, 0x06, 0xc3, 0xa5, 0xac, 0x7b, 0xec, 0xc5, 0x43, 0xc8, 0xc1, - 0x02, 0x1d, 0x13, 0x42, 0x36, 0xa0, 0x20, 0x58, 0xb7, 0x59, 0x50, 0xb2, 0xee, 0xe6, 0xcb, 0x3a, - 0x58, 0xa0, 0xc8, 0x4c, 0x36, 0xa1, 0x88, 0x52, 0x9a, 0x45, 0x05, 0x7a, 0x77, 0xaa, 0x02, 0x07, - 0x0b, 0x54, 0x01, 0xc8, 0x17, 0x50, 0xbe, 0x60, 0xd2, 0xf3, 0x3d, 0xe9, 0x35, 0x61, 0xb5, 0xd0, - 0xaa, 0x6e, 0xb4, 0x73, 0xc1, 0xe8, 0x20, 0xf7, 0xd8, 0x20, 0xf6, 0x22, 0x29, 0xae, 0xe8, 0x40, - 0x80, 0xfd, 0x08, 0xea, 0x23, 0x9f, 0xc8, 0x0a, 0x14, 0x9e, 0xb3, 0x2b, 0xed, 0x3f, 0x8a, 0x4b, - 0xf2, 0x26, 0x2c, 0xbd, 0xf0, 0xc2, 0x3e, 0x53, 0xae, 0xaa, 0x51, 0xbd, 0xd9, 0x5a, 0x7c, 0x68, - 0x6d, 0x97, 0x61, 0x59, 0x28, 0xf1, 0xce, 0x5f, 0x2c, 0x58, 0x19, 0xf7, 0x13, 0x39, 0x34, 0x16, - 0x5a, 0x4a, 0xc9, 0x8f, 0xe7, 0x70, 0x31, 0x12, 0x12, 0xad, 0xaa, 0x12, 0x61, 0x6f, 0x42, 0x65, - 0x40, 0x9a, 0xa6, 0x62, 0x25, 0xa3, 0xa2, 0xb3, 0x09, 0x05, 0xca, 0xba, 0xa4, 0x01, 0x8b, 0x81, - 0x49, 0x0a, 0xba, 0x18, 0xf8, 0x64, 0x15, 0x0a, 0x3e, 0xeb, 0x9a, 0xe0, 0x37, 0xdc, 0xf8, 0xdc, - 0xdd, 0x65, 0xdd, 0x20, 0x0a, 0x64, 0xc0, 0x23, 0x8a, 0x9f, 0x9c, 0xbf, 0x59, 0x98, 0x5c, 0xa8, - 0x16, 0xf9, 0x6c, 0xc4, 0x8e, 0xe9, 0xa9, 0x72, 0x4d, 0xfb, 0xa7, 0xf9, 0xda, 0x7f, 0x94, 0xd5, - 0x7e, 0x6a, 0xfe, 0x64, 0xad, 0x93, 0x50, 0xa7, 0x4c, 0xf6, 0x45, 0x44, 0xd9, 0xef, 0xfa, 0x2c, - 0x91, 0xe4, 0x17, 0x69, 0x44, 0x94, 0xfc, 0x69, 0x69, 0x85, 0x8c, 0xd4, 0x00, 0x48, 0x0b, 0x96, - 0x98, 0x10, 0x5c, 0x18, 0x2d, 0x88, 0xab, 0x2b, 0x87, 0x2b, 0xe2, 0x8e, 0x7b, 0xa6, 0x2a, 0x07, - 0xd5, 0x0c, 0xce, 0x0a, 0x34, 0xd2, 0x53, 0x93, 0x98, 0x47, 0x09, 0x73, 0x6e, 0x41, 0xfd, 0x30, - 0x8a, 0xfb, 0x32, 0x31, 0x7a, 0x38, 0xff, 0xb0, 0xa0, 0x91, 0x52, 0x34, 0x0f, 0xf9, 0x1a, 0xaa, - 0x43, 0x1f, 0xa7, 0xce, 0xdc, 0xca, 0xd1, 0x6f, 0x14, 0x9f, 0x09, 0x90, 0xf1, 0x6d, 0x56, 0x9c, - 0x7d, 0x02, 0x2b, 0xe3, 0x0c, 0x13, 0x3c, 0xfd, 0xde, 0xa8, 0xa7, 0xc7, 0x03, 0x9f, 0xf1, 0xec, - 0x9f, 0x2d, 0xb8, 0x43, 0x99, 0x2a, 0x85, 0x87, 0x17, 0x5e, 0x8f, 0xed, 0xf0, 0xa8, 0x1b, 0xf4, - 0x52, 0x37, 0xaf, 0xa8, 0xac, 0x4a, 0x25, 0x63, 0x82, 0xb5, 0xa0, 0x7c, 0x1a, 0x7a, 0xb2, 0xcb, - 0xc5, 0x85, 0x11, 0x5e, 0x43, 0xe1, 0x29, 0x8d, 0x0e, 0xbe, 0x92, 0x55, 0xa8, 0x1a, 0xc1, 0xc7, - 0xdc, 0x67, 0xaa, 0x66, 0x54, 0x68, 0x96, 0x44, 0x9a, 0x50, 0x3a, 0xe2, 0xbd, 0x13, 0xef, 0x82, - 0xa9, 0xe2, 0x50, 0xa1, 0xe9, 0xd6, 0xf9, 0xbd, 0x05, 0xf6, 0x24, 0xad, 0x8c, 0x8b, 0x3f, 0x87, - 0xe5, 0xdd, 0xa0, 0xc7, 0x12, 0x1d, 0xfd, 0xca, 0xf6, 0xc6, 0x77, 0xdf, 0xbf, 0xb3, 0xf0, 0xef, - 0xef, 0xdf, 0xb9, 0x9f, 0xa9, 0xab, 0x3c, 0x66, 0x51, 0x87, 0x47, 0xd2, 0x0b, 0x22, 0x26, 0xb0, - 0x3d, 0x7c, 0xe0, 0x2b, 0x88, 0xab, 0x91, 0xd4, 0x48, 0x20, 0x6f, 0xc1, 0xb2, 0x96, 0x6e, 0xae, - 0xbd, 0xd9, 0x39, 0xff, 0x5d, 0x82, 0xda, 0x19, 0x2a, 0x90, 0xfa, 0xc2, 0x05, 0x18, 0xba, 0xd0, - 0xa4, 0xdd, 0xb8, 0x63, 0x33, 0x1c, 0xc4, 0x86, 0xf2, 0xbe, 0x09, 0xb1, 0xb9, 0xae, 0x83, 0x3d, - 0xf9, 0x0a, 0xaa, 0xe9, 0xfa, 0x49, 0x2c, 0x9b, 0x05, 0x95, 0x23, 0x0f, 0x73, 0x72, 0x24, 0xab, - 0x89, 0x9b, 0x81, 0x9a, 0x0c, 0xc9, 0x50, 0xc8, 0x27, 0x70, 0xe7, 0xf0, 0x22, 0xe6, 0x42, 0xee, - 0x78, 0x9d, 0x67, 0x8c, 0x8e, 0x76, 0x81, 0xe2, 0x6a, 0xa1, 0x55, 0xa1, 0x37, 0x33, 0x90, 0x75, - 0x78, 0xc3, 0x0b, 0x43, 0x7e, 0x69, 0x2e, 0x8d, 0x4a, 0xff, 0xe6, 0xd2, 0xaa, 0xd5, 0x2a, 0xd3, - 0xeb, 0x1f, 0xc8, 0x87, 0x70, 0x3b, 0x43, 0x7c, 0x2c, 0x84, 0x77, 0x85, 0xf9, 0xb2, 0xac, 0xf8, - 0x27, 0x7d, 0xc2, 0x0a, 0xb6, 0x1f, 0x44, 0x5e, 0xd8, 0x04, 0xc5, 0xa3, 0x37, 0xc4, 0x81, 0xda, - 0xde, 0x4b, 0x54, 0x89, 0x89, 0xc7, 0x52, 0x8a, 0x66, 0x55, 0x85, 0x62, 0x84, 0x46, 0x4e, 0xa1, - 0xa6, 0x14, 0xd6, 0xba, 0x27, 0xcd, 0x9a, 0x72, 0xda, 0x7a, 0x8e, 0xd3, 0x14, 0xfb, 0x93, 0x38, - 0x73, 0x95, 0x46, 0x24, 0x90, 0x0e, 0x34, 0x52, 0xc7, 0xe9, 0x3b, 0xd8, 0xac, 0x2b, 0x99, 0x8f, - 0xe6, 0x0d, 0x84, 0x46, 0xeb, 0x23, 0xc6, 0x44, 0x62, 0x1a, 0xec, 0xe1, 0x75, 0xf3, 0x24, 0x6b, - 0x36, 0x94, 0xcd, 0x83, 0xbd, 0xfd, 0x29, 0xac, 0x8c, 0xc7, 0x72, 0x9e, 0xa2, 0x6f, 0xff, 0x0a, - 0x6e, 0x4f, 0x50, 0xe1, 0xb5, 0xea, 0xc1, 0xdf, 0x2d, 0x78, 0xe3, 0x9a, 0xdf, 0x08, 0x81, 0xe2, - 0x97, 0x57, 0x31, 0x33, 0x22, 0xd5, 0x9a, 0x1c, 0xc3, 0x12, 0xc6, 0x25, 0x69, 0x2e, 0x2a, 0xa7, - 0x6d, 0xce, 0x13, 0x08, 0x57, 0x21, 0xb5, 0xc3, 0xb4, 0x14, 0xfb, 0x21, 0xc0, 0x90, 0x38, 0x57, - 0xeb, 0xfb, 0x1a, 0xea, 0x26, 0x2a, 0xa6, 0x3c, 0xac, 0xe8, 0x29, 0xc5, 0x80, 0x71, 0x06, 0x19, - 0xb6, 0x8b, 0xc2, 0x9c, 0xed, 0xc2, 0xf9, 0x16, 0x6e, 0x51, 0xe6, 0xf9, 0xfb, 0x41, 0xc8, 0x6e, - 0xae, 0x8a, 0x78, 0xd7, 0x83, 0x90, 0x9d, 0x7a, 0xf2, 0xd9, 0xe0, 0xae, 0x9b, 0x3d, 0xd9, 0x82, - 0x25, 0xea, 0x45, 0x3d, 0x66, 0x8e, 0x7e, 0x2f, 0xe7, 0x68, 0x75, 0x08, 0xf2, 0x52, 0x0d, 0x71, - 0x1e, 0x41, 0x65, 0x40, 0xc3, 0x4a, 0xf5, 0xa4, 0xdb, 0x4d, 0x98, 0xae, 0x7a, 0x05, 0x6a, 0x76, - 0x48, 0x3f, 0x62, 0x51, 0xcf, 0x1c, 0x5d, 0xa0, 0x66, 0xe7, 0xac, 0xe1, 0xa8, 0x92, 0x6a, 0x6e, - 0x5c, 0x43, 0xa0, 0xb8, 0x8b, 0xf3, 0x94, 0xa5, 0x2e, 0x98, 0x5a, 0x3b, 0x3e, 0xb6, 0x39, 0xcf, - 0xdf, 0x0d, 0xc4, 0xcd, 0x06, 0x36, 0xa1, 0xb4, 0x1b, 0x88, 0x8c, 0x7d, 0xe9, 0x96, 0xac, 0x61, - 0x03, 0xec, 0x84, 0x7d, 0x1f, 0xad, 0x95, 0x4c, 0x44, 0xa6, 0xd2, 0x8f, 0x51, 0x9d, 0xcf, 0xb4, - 0x1f, 0xd5, 0x29, 0x46, 0x99, 0x75, 0x28, 0xb1, 0x48, 0x8a, 0x80, 0xa5, 0x5d, 0x92, 0xb8, 0x7a, - 0x04, 0x76, 0xd5, 0x08, 0xac, 0xba, 0x31, 0x4d, 0x59, 0x9c, 0x4d, 0xb8, 0x85, 0x84, 0xfc, 0x40, - 0x10, 0x28, 0x66, 0x94, 0x54, 0x6b, 0x67, 0x0b, 0x56, 0x86, 0x40, 0x73, 0xf4, 0x1a, 0x14, 0x71, - 0xc0, 0x36, 0x65, 0x7c, 0xd2, 0xb9, 0xea, 0xbb, 0x53, 0x87, 0xea, 0x69, 0x10, 0xa5, 0xfd, 0xd0, - 0x79, 0x65, 0x41, 0xed, 0x94, 0x47, 0xc3, 0x4e, 0x74, 0x0a, 0xb7, 0xd2, 0x1b, 0xf8, 0xf8, 0xf4, - 0x70, 0xc7, 0x8b, 0x53, 0x53, 0x56, 0xaf, 0x87, 0xd9, 0xbc, 0x05, 0x5c, 0xcd, 0xb8, 0x5d, 0xc4, - 0xa6, 0x45, 0xc7, 0xe1, 0xe4, 0x97, 0x50, 0x3a, 0x3a, 0xda, 0x56, 0x92, 0x16, 0xe7, 0x92, 0x94, - 0xc2, 0xc8, 0xa7, 0x50, 0x7a, 0xaa, 0x9e, 0x28, 0x89, 0x69, 0x2c, 0x13, 0x52, 0x4e, 0x1b, 0xaa, - 0xd9, 0x28, 0xeb, 0x70, 0xe1, 0xd3, 0x14, 0xe4, 0xfc, 0xcf, 0x82, 0xea, 0x53, 0x6f, 0x38, 0x6b, - 0x7d, 0x0e, 0xcb, 0xfe, 0x6b, 0x77, 0x5b, 0xbd, 0xc5, 0x5b, 0x1c, 0xb2, 0x17, 0x2c, 0x34, 0xa9, - 0xaa, 0x37, 0x48, 0x4d, 0x9e, 0x71, 0xa1, 0x6f, 0x67, 0x8d, 0xea, 0x0d, 0xe6, 0xb5, 0xcf, 0xa4, - 0x17, 0x84, 0xaa, 0x6b, 0xd5, 0xa8, 0xd9, 0x61, 0xd4, 0xfb, 0x22, 0x54, 0x4d, 0xa9, 0x42, 0x71, - 0x49, 0x1c, 0x28, 0x06, 0x51, 0x97, 0xab, 0xbe, 0x63, 0xaa, 0xdb, 0x19, 0xef, 0x8b, 0x0e, 0x3b, - 0x8c, 0xba, 0x9c, 0xaa, 0x6f, 0xe4, 0x5d, 0x58, 0x16, 0x78, 0x8d, 0x92, 0x66, 0x49, 0x39, 0xa5, - 0x82, 0x5c, 0xfa, 0xb2, 0x99, 0x0f, 0x4e, 0x03, 0x6a, 0xda, 0x6e, 0x33, 0xed, 0xfd, 0x69, 0x11, - 0x6e, 0x9f, 0xb0, 0xcb, 0x9d, 0xd4, 0xae, 0xd4, 0x21, 0xab, 0x50, 0x1d, 0xd0, 0x0e, 0x77, 0x4d, - 0xfa, 0x65, 0x49, 0x78, 0xd8, 0x31, 0xef, 0x47, 0x32, 0x8d, 0xa1, 0x3a, 0x4c, 0x51, 0xa8, 0xf9, - 0x40, 0x7e, 0x0a, 0xa5, 0x13, 0x26, 0xf1, 0x2d, 0xa9, 0xac, 0x6e, 0x6c, 0x54, 0x91, 0xe7, 0x84, - 0x49, 0x1c, 0x8d, 0x68, 0xfa, 0x0d, 0xe7, 0xad, 0x38, 0x9d, 0xb7, 0x8a, 0x93, 0xe6, 0xad, 0xf4, - 0x2b, 0xd9, 0x84, 0x6a, 0x87, 0x47, 0x89, 0x14, 0x5e, 0x80, 0x07, 0x2f, 0x29, 0xe6, 0x1f, 0x21, - 0xb3, 0x0e, 0xec, 0xce, 0xf0, 0x23, 0xcd, 0x72, 0x92, 0xfb, 0x00, 0xec, 0xa5, 0x14, 0xde, 0x01, - 0x4f, 0x64, 0xd2, 0x5c, 0x56, 0x0a, 0x03, 0xe2, 0x90, 0x70, 0x78, 0x4a, 0x33, 0x5f, 0x9d, 0xb7, - 0xe0, 0xcd, 0x51, 0x8f, 0x18, 0x57, 0x3d, 0x82, 0x1f, 0x53, 0x16, 0x32, 0x2f, 0x61, 0xf3, 0x7b, - 0xcb, 0xb1, 0xa1, 0x79, 0x1d, 0x6c, 0x04, 0xff, 0xbf, 0x00, 0xd5, 0xbd, 0x97, 0xac, 0x73, 0xcc, - 0x92, 0xc4, 0xeb, 0x31, 0xf2, 0x36, 0x54, 0x4e, 0x05, 0xef, 0xb0, 0x24, 0x19, 0xc8, 0x1a, 0x12, - 0xc8, 0x27, 0x50, 0x3c, 0x8c, 0x02, 0x69, 0xda, 0xdc, 0x5a, 0xee, 0xd0, 0x1d, 0x48, 0x23, 0x13, - 0x1f, 0x9c, 0xb8, 0x25, 0x5b, 0x50, 0xc4, 0x22, 0x31, 0x4b, 0xa1, 0xf6, 0x33, 0x58, 0xc4, 0x90, - 0x6d, 0xf5, 0x44, 0x0f, 0xbe, 0x61, 0x26, 0x4a, 0xad, 0xfc, 0x0e, 0x13, 0x7c, 0xc3, 0x86, 0x12, - 0x0c, 0x92, 0xec, 0x41, 0xe9, 0x4c, 0x7a, 0x02, 0xe7, 0x34, 0x1d, 0xbd, 0x7b, 0x79, 0x83, 0x88, - 0xe6, 0x1c, 0x4a, 0x49, 0xb1, 0xe8, 0x84, 0xbd, 0x97, 0x81, 0x34, 0xb7, 0x21, 0xcf, 0x09, 0xc8, - 0x96, 0x31, 0x04, 0xb7, 0x88, 0xde, 0xe5, 0x11, 0x6b, 0x96, 0xa6, 0xa2, 0x91, 0x2d, 0x83, 0xc6, - 0x2d, 0xba, 0xe1, 0x2c, 0xe8, 0xe1, 0x7c, 0x57, 0x9e, 0xea, 0x06, 0xcd, 0x98, 0x71, 0x83, 0x26, - 0x6c, 0x97, 0x60, 0x49, 0x4d, 0x33, 0xce, 0x5f, 0x2d, 0xa8, 0x66, 0xe2, 0x34, 0xc3, 0xbd, 0x7b, - 0x1b, 0x8a, 0xf8, 0xca, 0x37, 0xf1, 0x2f, 0xab, 0x5b, 0xc7, 0xa4, 0x47, 0x15, 0x15, 0x0b, 0xc7, - 0xbe, 0xaf, 0x8b, 0x62, 0x9d, 0xe2, 0x12, 0x29, 0x5f, 0xca, 0x2b, 0x15, 0xb2, 0x32, 0xc5, 0x25, - 0x59, 0x87, 0xf2, 0x19, 0xeb, 0xf4, 0x45, 0x20, 0xaf, 0x54, 0x10, 0x1a, 0x1b, 0x2b, 0xaa, 0x9c, - 0x18, 0x9a, 0xba, 0x9c, 0x03, 0x0e, 0xe7, 0x0b, 0x4c, 0xce, 0xa1, 0x82, 0x04, 0x8a, 0x3b, 0xf8, - 0xd6, 0x41, 0xcd, 0xea, 0x54, 0xad, 0xf1, 0xb9, 0xb9, 0x37, 0xed, 0xb9, 0xb9, 0x97, 0x3e, 0x37, - 0x47, 0x83, 0x8a, 0xdd, 0x27, 0xe3, 0x64, 0xe7, 0x31, 0x54, 0x06, 0x89, 0x87, 0x2f, 0xfd, 0x7d, - 0xdf, 0x9c, 0xb4, 0xb8, 0xef, 0xa3, 0x29, 0x7b, 0x4f, 0xf6, 0xd5, 0x29, 0x65, 0x8a, 0xcb, 0x41, - 0xaf, 0x2f, 0x64, 0x7a, 0xfd, 0x26, 0x3e, 0xa4, 0x33, 0xd9, 0x87, 0x4c, 0x94, 0x5f, 0x26, 0xa9, - 0xca, 0xb8, 0xd6, 0x66, 0x84, 0x89, 0x92, 0xa5, 0xcc, 0x08, 0x13, 0xe7, 0x27, 0x50, 0x1f, 0x89, - 0x17, 0x32, 0xa9, 0x97, 0x9b, 0x19, 0x09, 0x71, 0xbd, 0xf1, 0xaf, 0x0a, 0x54, 0x8e, 0x8e, 0xb6, - 0xb7, 0x45, 0xe0, 0xf7, 0x18, 0xf9, 0x83, 0x05, 0xe4, 0xfa, 0x23, 0x8e, 0x7c, 0x94, 0x7f, 0x33, - 0x26, 0xbf, 0x44, 0xed, 0x8f, 0xe7, 0x44, 0x99, 0xfe, 0xfc, 0x15, 0x2c, 0xa9, 0xd9, 0x90, 0xfc, - 0x6c, 0xc6, 0x99, 0xde, 0x6e, 0x4d, 0x67, 0x34, 0xb2, 0x3b, 0x50, 0x4e, 0xe7, 0x2b, 0x72, 0x3f, - 0x57, 0xbd, 0x91, 0xf1, 0xd1, 0x7e, 0x7f, 0x26, 0x5e, 0x73, 0xc8, 0x6f, 0xa1, 0x64, 0xc6, 0x26, - 0x72, 0x6f, 0x0a, 0x6e, 0x38, 0xc0, 0xd9, 0xf7, 0x67, 0x61, 0x1d, 0x9a, 0x91, 0x8e, 0x47, 0xb9, - 0x66, 0x8c, 0x0d, 0x5f, 0xb9, 0x66, 0x5c, 0x9b, 0xb7, 0x9e, 0x42, 0x11, 0xe7, 0x28, 0x92, 0x57, - 0x4f, 0x32, 0x83, 0x96, 0x9d, 0x17, 0xae, 0x91, 0x01, 0xec, 0x37, 0x58, 0x77, 0xd5, 0x5b, 0x34, - 0xbf, 0xe2, 0x66, 0xfe, 0x3c, 0xb2, 0xef, 0xcd, 0xc0, 0x39, 0x14, 0x6f, 0xde, 0x71, 0xad, 0x19, - 0xfe, 0xc1, 0x99, 0x2e, 0x7e, 0xec, 0xbf, 0x22, 0x0e, 0xb5, 0x6c, 0x3b, 0x25, 0x6e, 0x0e, 0x74, - 0xc2, 0x24, 0x62, 0xb7, 0x67, 0xe6, 0x37, 0x07, 0x7e, 0x8b, 0x6f, 0x82, 0xd1, 0x56, 0x4b, 0x36, - 0x72, 0xdd, 0x31, 0xb1, 0xa9, 0xdb, 0x0f, 0xe6, 0xc2, 0x98, 0xc3, 0x3d, 0xdd, 0xca, 0x4d, 0xbb, - 0x26, 0xf9, 0x9d, 0x69, 0xd0, 0xf2, 0xed, 0x19, 0xf9, 0x5a, 0xd6, 0x87, 0x16, 0xe6, 0x19, 0x8e, - 0x70, 0xb9, 0xb2, 0x33, 0xb3, 0x6d, 0x6e, 0x9e, 0x65, 0x67, 0xc1, 0xed, 0xda, 0x77, 0xaf, 0xee, - 0x5a, 0xff, 0x7c, 0x75, 0xd7, 0xfa, 0xcf, 0xab, 0xbb, 0xd6, 0xf9, 0xb2, 0xfa, 0x63, 0xfe, 0xc1, - 0x0f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x92, 0x5d, 0x25, 0xb8, 0xea, 0x18, 0x00, 0x00, + // 2452 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x59, 0xcf, 0x6f, 0x1b, 0xc7, + 0xf5, 0xd7, 0x8a, 0x14, 0x25, 0x3d, 0x52, 0x14, 0x3d, 0x76, 0xf2, 0xa5, 0x17, 0x81, 0x23, 0xaf, + 0x63, 0x45, 0x56, 0x9c, 0xa5, 0xbf, 0xb2, 0x0d, 0xb9, 0x76, 0xeb, 0xc4, 0xfa, 0x05, 0x29, 0x96, + 0x6c, 0x76, 0xe4, 0xc2, 0x45, 0x90, 0x02, 0x5d, 0x71, 0x87, 0xf4, 0xd6, 0xab, 0xdd, 0xed, 0xee, + 0xd0, 0x32, 0x93, 0x4b, 0x7b, 0x28, 0x50, 0xe4, 0xd4, 0x53, 0x6f, 0x41, 0x81, 0x16, 0xe8, 0xb9, + 0xfd, 0x03, 0xda, 0x73, 0x80, 0x5e, 0x7a, 0xee, 0x21, 0x28, 0xfc, 0x0f, 0xf4, 0x56, 0xa0, 0xb7, + 0xe2, 0xcd, 0xcc, 0x92, 0xc3, 0x1f, 0x5a, 0x92, 0xf5, 0x89, 0x33, 0x6f, 0xde, 0x8f, 0x79, 0xef, + 0xcd, 0x7b, 0xf3, 0x99, 0x25, 0x2c, 0xb5, 0x1c, 0xce, 0xce, 0x9c, 0x8e, 0x1d, 0xc5, 0x21, 0x0f, + 0xc9, 0xe5, 0xd3, 0xf0, 0xa4, 0x63, 0x9f, 0xb4, 0x3d, 0xdf, 0x7d, 0xe9, 0x71, 0xfb, 0xd5, 0xff, + 0xdb, 0xcd, 0x38, 0x0c, 0x38, 0x0b, 0x5c, 0xf3, 0xe3, 0x96, 0xc7, 0x5f, 0xb4, 0x4f, 0xec, 0x46, + 0x78, 0x5a, 0x6b, 0x85, 0xad, 0xb0, 0x26, 0x24, 0x4e, 0xda, 0x4d, 0x31, 0x13, 0x13, 0x31, 0x92, + 0x9a, 0xcc, 0x8d, 0x41, 0xf6, 0x56, 0x18, 0xb6, 0x7c, 0xe6, 0x44, 0x5e, 0xa2, 0x86, 0xb5, 0x38, + 0x6a, 0xd4, 0x12, 0xee, 0xf0, 0x76, 0xa2, 0x64, 0x6e, 0x6a, 0x32, 0xb8, 0x91, 0x5a, 0xba, 0x91, + 0x5a, 0x12, 0xfa, 0xaf, 0x58, 0x5c, 0x8b, 0x4e, 0x6a, 0x61, 0x94, 0x72, 0xd7, 0xce, 0xe5, 0x76, + 0x22, 0xaf, 0xc6, 0x3b, 0x11, 0x4b, 0x6a, 0x67, 0x61, 0xfc, 0x92, 0xc5, 0x4a, 0xe0, 0xf6, 0xb9, + 0x02, 0x6d, 0xee, 0xf9, 0x28, 0xd5, 0x70, 0xa2, 0x04, 0x8d, 0xe0, 0xaf, 0x12, 0xd2, 0xdd, 0xe6, + 0x61, 0xe0, 0x25, 0xdc, 0xf3, 0x5a, 0x5e, 0xad, 0x99, 0x08, 0x19, 0x69, 0x05, 0x9d, 0x50, 0xec, + 0x77, 0x33, 0x5c, 0x68, 0xc7, 0x0d, 0x16, 0x85, 0xbe, 0xd7, 0xe8, 0xa0, 0x0d, 0x39, 0x92, 0x62, + 0xd6, 0xdf, 0xf2, 0x50, 0xa0, 0x2c, 0x69, 0xfb, 0x9c, 0xac, 0xc2, 0x52, 0xcc, 0x9a, 0x3b, 0x2c, + 0x8a, 0x59, 0xc3, 0xe1, 0xcc, 0xad, 0x1a, 0x2b, 0xc6, 0xda, 0xe2, 0xfe, 0x0c, 0xed, 0x27, 0x93, + 0x1f, 0x41, 0x39, 0x66, 0xcd, 0x44, 0x63, 0x9c, 0x5d, 0x31, 0xd6, 0x8a, 0x1b, 0x1f, 0xd9, 0xe7, + 0xe6, 0xd0, 0xa6, 0xac, 0x79, 0xe4, 0x44, 0x3d, 0x91, 0xfd, 0x19, 0x3a, 0xa0, 0x84, 0x6c, 0x40, + 0x2e, 0x66, 0xcd, 0x6a, 0x4e, 0xe8, 0xba, 0x92, 0xad, 0x6b, 0x7f, 0x86, 0x22, 0x33, 0xd9, 0x84, + 0x3c, 0x6a, 0xa9, 0xe6, 0x85, 0xd0, 0xd5, 0xb1, 0x1b, 0xd8, 0x9f, 0xa1, 0x42, 0x80, 0x3c, 0x86, + 0x85, 0x53, 0xc6, 0x1d, 0xd7, 0xe1, 0x4e, 0x15, 0x56, 0x72, 0x6b, 0xc5, 0x8d, 0x5a, 0xa6, 0x30, + 0x06, 0xc8, 0x3e, 0x52, 0x12, 0xbb, 0x01, 0x8f, 0x3b, 0xb4, 0xab, 0x80, 0x3c, 0x87, 0x92, 0xc3, + 0x39, 0xc3, 0x64, 0x78, 0x61, 0x90, 0x54, 0x4b, 0x42, 0xe1, 0xed, 0xf1, 0x0a, 0x1f, 0x69, 0x52, + 0x52, 0x69, 0x9f, 0x22, 0xf3, 0x01, 0x2c, 0xf5, 0xd9, 0x24, 0x15, 0xc8, 0xbd, 0x64, 0x1d, 0x99, + 0x18, 0x8a, 0x43, 0x72, 0x09, 0xe6, 0x5e, 0x39, 0x7e, 0x9b, 0x89, 0x1c, 0x94, 0xa8, 0x9c, 0xdc, + 0x9f, 0xbd, 0x67, 0x98, 0x2f, 0xe0, 0xc2, 0x90, 0xfe, 0x11, 0x0a, 0x7e, 0xa0, 0x2b, 0x28, 0x6e, + 0x7c, 0x98, 0xb1, 0x6b, 0x5d, 0x9d, 0x66, 0x69, 0x6b, 0x01, 0x0a, 0xb1, 0x70, 0xc8, 0xfa, 0xad, + 0x01, 0x95, 0xc1, 0x54, 0x93, 0x03, 0x95, 0x24, 0x43, 0x84, 0xe5, 0xee, 0x14, 0xa7, 0x04, 0x09, + 0x2a, 0x30, 0x42, 0x85, 0xb9, 0x09, 0x8b, 0x5d, 0xd2, 0xb8, 0x60, 0x2c, 0x6a, 0x5b, 0xb4, 0x36, + 0x21, 0x47, 0x59, 0x93, 0x94, 0x61, 0xd6, 0x53, 0xe7, 0x9a, 0xce, 0x7a, 0x2e, 0x59, 0x81, 0x9c, + 0xcb, 0x9a, 0xca, 0xf5, 0xb2, 0x1d, 0x9d, 0xd8, 0x3b, 0xac, 0xe9, 0x05, 0x1e, 0xba, 0x48, 0x71, + 0xc9, 0xfa, 0xbd, 0x81, 0xf5, 0x81, 0xdb, 0x22, 0x9f, 0xf4, 0xf9, 0x31, 0xfe, 0xb4, 0x0f, 0xed, + 0xfe, 0x79, 0xf6, 0xee, 0xef, 0xf4, 0x67, 0x62, 0x4c, 0x09, 0xe8, 0xde, 0xfd, 0x18, 0x4a, 0x7a, + 0x6e, 0xc8, 0x3e, 0x14, 0xb5, 0x73, 0xa4, 0x36, 0xbc, 0x3a, 0x59, 0x66, 0xa9, 0x2e, 0x6a, 0xfd, + 0x31, 0x07, 0x45, 0x6d, 0x91, 0x3c, 0x84, 0xfc, 0x4b, 0x2f, 0x90, 0x21, 0x2c, 0x6f, 0xac, 0x4f, + 0xa6, 0xf2, 0xb1, 0x17, 0xb8, 0x54, 0xc8, 0x91, 0xba, 0x56, 0x77, 0xb3, 0x62, 0x5b, 0x77, 0x26, + 0xd3, 0x71, 0x6e, 0xf1, 0xdd, 0x9a, 0xa2, 0x6d, 0xc8, 0xa6, 0x41, 0x20, 0x1f, 0x39, 0xfc, 0x85, + 0x68, 0x1a, 0x8b, 0x54, 0x8c, 0xc9, 0x2d, 0xb8, 0xe8, 0x05, 0xcf, 0x42, 0x1e, 0xd6, 0x63, 0xe6, + 0x7a, 0x78, 0xf8, 0x9e, 0x75, 0x22, 0x56, 0x9d, 0x13, 0x2c, 0xa3, 0x96, 0x48, 0x1d, 0xca, 0x92, + 0x7c, 0xdc, 0x3e, 0xf9, 0x19, 0x6b, 0xf0, 0xa4, 0x5a, 0x10, 0xfe, 0xac, 0x65, 0x6c, 0xe1, 0x40, + 0x17, 0xa0, 0x03, 0xf2, 0x6f, 0x55, 0xed, 0xd6, 0x9f, 0x0d, 0x58, 0xea, 0x53, 0x4f, 0x3e, 0xed, + 0x4b, 0xd5, 0xcd, 0x49, 0xb7, 0xa5, 0x25, 0xeb, 0x33, 0x28, 0xb8, 0x5e, 0x8b, 0x25, 0x5c, 0xa4, + 0x6a, 0x71, 0x6b, 0xe3, 0xdb, 0xef, 0xde, 0x9f, 0xf9, 0xc7, 0x77, 0xef, 0xaf, 0x6b, 0x57, 0x4d, + 0x18, 0xb1, 0xa0, 0x11, 0x06, 0xdc, 0xf1, 0x02, 0x16, 0xe3, 0x05, 0xfb, 0xb1, 0x14, 0xb1, 0x77, + 0xc4, 0x0f, 0x55, 0x1a, 0x30, 0xe8, 0x81, 0x73, 0xca, 0x44, 0x9e, 0x16, 0xa9, 0x18, 0x5b, 0x1c, + 0x96, 0x28, 0xe3, 0xed, 0x38, 0xa0, 0xec, 0xe7, 0x6d, 0x64, 0xfa, 0x5e, 0xda, 0x48, 0xc4, 0xa6, + 0xc7, 0x35, 0x74, 0x64, 0xa4, 0x4a, 0x80, 0xac, 0xc1, 0x1c, 0x8b, 0xe3, 0x30, 0x56, 0xc5, 0x43, + 0x6c, 0x79, 0xd5, 0xdb, 0x71, 0xd4, 0xb0, 0x8f, 0xc5, 0x55, 0x4f, 0x25, 0x83, 0x55, 0x81, 0x72, + 0x6a, 0x35, 0x89, 0xc2, 0x20, 0x61, 0xd6, 0x32, 0x86, 0x2e, 0x6a, 0xf3, 0x44, 0xed, 0xc3, 0xfa, + 0xab, 0x01, 0xe5, 0x94, 0x22, 0x79, 0xc8, 0x17, 0x50, 0xec, 0xb5, 0x86, 0xb4, 0x07, 0xdc, 0xcf, + 0x0c, 0xaa, 0x2e, 0xaf, 0xf5, 0x15, 0xd5, 0x12, 0x74, 0x75, 0xe6, 0x13, 0xa8, 0x0c, 0x32, 0x8c, + 0xc8, 0xfe, 0x07, 0xfd, 0x0d, 0x62, 0xb0, 0x5f, 0x69, 0xa7, 0xe1, 0x5f, 0x06, 0x5c, 0xa6, 0x4c, + 0x60, 0x97, 0x83, 0x53, 0xa7, 0xc5, 0xb6, 0xc3, 0xa0, 0xe9, 0xb5, 0xd2, 0x30, 0x57, 0x44, 0x33, + 0x4c, 0x35, 0x63, 0x5f, 0x5c, 0x83, 0x85, 0xba, 0xef, 0xf0, 0x66, 0x18, 0x9f, 0x2a, 0xe5, 0x25, + 0x54, 0x9e, 0xd2, 0x68, 0x77, 0x95, 0xac, 0x40, 0x51, 0x29, 0x3e, 0x0a, 0xdd, 0x34, 0x9d, 0x3a, + 0x89, 0x54, 0x61, 0xfe, 0x30, 0x6c, 0x3d, 0xc1, 0x64, 0xcb, 0x0a, 0x4b, 0xa7, 0xc4, 0x82, 0x92, + 0x62, 0x8c, 0xbb, 0xd5, 0x35, 0x47, 0xfb, 0x68, 0xe4, 0x3d, 0x58, 0x3c, 0x66, 0x49, 0xe2, 0x85, + 0xc1, 0xc1, 0x4e, 0xb5, 0x20, 0xe4, 0x7b, 0x04, 0xd4, 0x7d, 0xcc, 0xc3, 0x98, 0x1d, 0xec, 0x54, + 0xe7, 0xa5, 0x6e, 0x35, 0xb5, 0x7e, 0x61, 0x80, 0x39, 0xca, 0x63, 0x95, 0xbe, 0xcf, 0xa0, 0x20, + 0x0f, 0xa4, 0xf4, 0xfa, 0x7f, 0x3b, 0xca, 0xf2, 0x97, 0xbc, 0x0b, 0x05, 0xa9, 0x5d, 0x55, 0xa1, + 0x9a, 0x59, 0xbf, 0x2a, 0x40, 0xe9, 0x18, 0x37, 0x90, 0xc6, 0xd9, 0x06, 0xe8, 0xa5, 0x47, 0x1d, + 0xe9, 0xc1, 0xa4, 0x69, 0x1c, 0xc4, 0x84, 0x85, 0x3d, 0x75, 0x7c, 0xd4, 0x0d, 0xd6, 0x9d, 0x93, + 0xcf, 0xa1, 0x98, 0x8e, 0x9f, 0x46, 0xbc, 0x9a, 0x13, 0xe7, 0xef, 0x5e, 0xc6, 0xf9, 0xd3, 0x77, + 0x62, 0x6b, 0xa2, 0xea, 0xf4, 0x69, 0x14, 0x72, 0x13, 0x2e, 0x38, 0xbe, 0x1f, 0x9e, 0xa9, 0x92, + 0x12, 0xc5, 0x21, 0x92, 0xb3, 0x40, 0x87, 0x17, 0xb0, 0x55, 0x6a, 0xc4, 0x47, 0x71, 0xec, 0x74, + 0xf0, 0x34, 0x15, 0x04, 0xff, 0xa8, 0x25, 0xec, 0x5a, 0x7b, 0x5e, 0xe0, 0xf8, 0x55, 0x10, 0x3c, + 0x72, 0x82, 0xa7, 0x61, 0xf7, 0x75, 0x14, 0xc6, 0x9c, 0xc5, 0x8f, 0x38, 0x8f, 0xab, 0x45, 0x11, + 0xcc, 0x3e, 0x1a, 0xa9, 0x43, 0x69, 0xdb, 0x69, 0xbc, 0x60, 0x07, 0xa7, 0x48, 0x4c, 0x91, 0x55, + 0x56, 0x2f, 0x13, 0xec, 0x4f, 0x23, 0x1d, 0x52, 0xe9, 0x1a, 0x48, 0x03, 0xca, 0xa9, 0xeb, 0xb2, + 0x42, 0xab, 0x4b, 0x42, 0xe7, 0x83, 0x69, 0x43, 0x29, 0xa5, 0xa5, 0x89, 0x01, 0x95, 0x98, 0xc8, + 0x5d, 0x2c, 0x46, 0x87, 0xb3, 0x6a, 0x59, 0xf8, 0xdc, 0x9d, 0x93, 0x23, 0x28, 0x1f, 0x0b, 0x40, + 0x5e, 0x47, 0x18, 0xee, 0xb1, 0xa4, 0xba, 0x2c, 0x36, 0x70, 0x7d, 0x78, 0x03, 0x3a, 0x70, 0xb7, + 0x05, 0x7b, 0x87, 0x0e, 0x08, 0x9b, 0x0f, 0xa1, 0x32, 0x98, 0xdc, 0x69, 0x80, 0x91, 0xf9, 0x43, + 0xb8, 0x38, 0xc2, 0xa3, 0xb7, 0x6a, 0x3e, 0x7f, 0x32, 0xe0, 0xc2, 0x50, 0x1a, 0xf0, 0x02, 0x10, + 0x45, 0x2f, 0x55, 0x8a, 0x31, 0x39, 0x82, 0x39, 0x4c, 0x73, 0xa2, 0xa0, 0xc0, 0xe6, 0x34, 0x79, + 0xb5, 0x85, 0xa4, 0x8c, 0xbf, 0xd4, 0x62, 0xde, 0x03, 0xe8, 0x11, 0xa7, 0x82, 0x87, 0x5f, 0xc0, + 0x92, 0x4a, 0xb2, 0xea, 0x17, 0x15, 0x89, 0x2a, 0x94, 0x30, 0xa2, 0x86, 0xde, 0xdd, 0x94, 0x9b, + 0xf2, 0x6e, 0xb2, 0xbe, 0x82, 0x65, 0xca, 0x1c, 0x77, 0xcf, 0xf3, 0xd9, 0xf9, 0x2d, 0x18, 0x8b, + 0xdf, 0xf3, 0x59, 0x1d, 0x91, 0x49, 0x5a, 0xfc, 0x6a, 0x4e, 0xee, 0xc3, 0x1c, 0x75, 0x82, 0x16, + 0x53, 0xa6, 0x3f, 0xc8, 0x30, 0x2d, 0x8c, 0x20, 0x2f, 0x95, 0x22, 0xd6, 0x03, 0x58, 0xec, 0xd2, + 0xb0, 0x75, 0x3d, 0x6d, 0x36, 0x13, 0x26, 0xdb, 0x60, 0x8e, 0xaa, 0x19, 0xd2, 0x0f, 0x59, 0xd0, + 0x52, 0xa6, 0x73, 0x54, 0xcd, 0xac, 0x55, 0x84, 0xf3, 0xe9, 0xce, 0x55, 0x68, 0x08, 0xe4, 0x77, + 0x10, 0xbe, 0x19, 0xa2, 0x5e, 0xc5, 0xd8, 0x72, 0xf1, 0x4e, 0x75, 0xdc, 0x1d, 0x2f, 0x3e, 0xdf, + 0xc1, 0x2a, 0xcc, 0xef, 0x78, 0xb1, 0xe6, 0x5f, 0x3a, 0x25, 0xab, 0x78, 0xdb, 0x36, 0xfc, 0xb6, + 0x8b, 0xde, 0x72, 0x16, 0x07, 0xea, 0x5a, 0x19, 0xa0, 0x5a, 0x9f, 0xc8, 0x38, 0x0a, 0x2b, 0x6a, + 0x33, 0x37, 0x61, 0x9e, 0x05, 0x3c, 0xc6, 0x32, 0x92, 0x57, 0x32, 0xb1, 0xe5, 0x03, 0xd9, 0x16, + 0x0f, 0x64, 0x71, 0xf5, 0xd3, 0x94, 0xc5, 0xda, 0x84, 0x65, 0x24, 0x64, 0x27, 0x82, 0x40, 0x5e, + 0xdb, 0xa4, 0x18, 0x5b, 0xf7, 0xa1, 0xd2, 0x13, 0x54, 0xa6, 0x57, 0x21, 0x8f, 0xd8, 0x54, 0xf5, + 0xf5, 0x51, 0x76, 0xc5, 0xba, 0x75, 0x0d, 0x96, 0xd3, 0xe2, 0x3f, 0xd7, 0xa8, 0x45, 0xa0, 0xd2, + 0x63, 0x52, 0xb0, 0x64, 0x09, 0x8a, 0x75, 0x2f, 0x48, 0x6f, 0x6d, 0xeb, 0x8d, 0x01, 0xa5, 0x7a, + 0x18, 0xf4, 0xee, 0xb4, 0x3a, 0x2c, 0xa7, 0xa5, 0xfb, 0xa8, 0x7e, 0xb0, 0xed, 0x44, 0x69, 0x0c, + 0x56, 0x86, 0xcf, 0x87, 0xfa, 0xc4, 0x60, 0x4b, 0xc6, 0xad, 0x3c, 0x5e, 0x7f, 0x74, 0x50, 0x9c, + 0x7c, 0x0a, 0xf3, 0x87, 0x87, 0x5b, 0x42, 0xd3, 0xec, 0x54, 0x9a, 0x52, 0x31, 0xf2, 0x10, 0xe6, + 0x9f, 0x8b, 0x2f, 0x1f, 0x89, 0xba, 0xa2, 0x46, 0x9c, 0x55, 0x19, 0x21, 0xc9, 0x46, 0x59, 0x23, + 0x8c, 0x5d, 0x9a, 0x0a, 0x59, 0xff, 0x36, 0xa0, 0xf8, 0xdc, 0xe9, 0x21, 0xc2, 0x1e, 0x04, 0x7d, + 0x8b, 0x7b, 0x5b, 0x41, 0xd0, 0x4b, 0x30, 0xe7, 0xb3, 0x57, 0xcc, 0x57, 0x67, 0x5c, 0x4e, 0x90, + 0x9a, 0xbc, 0x08, 0x63, 0x59, 0xd6, 0x25, 0x2a, 0x27, 0x58, 0x10, 0x2e, 0xe3, 0x8e, 0xe7, 0x57, + 0xf3, 0x2b, 0x39, 0xbc, 0xe3, 0xe5, 0x0c, 0x33, 0xd7, 0x8e, 0x7d, 0xf5, 0x2e, 0xc0, 0x21, 0xb1, + 0x20, 0xef, 0x05, 0xcd, 0x50, 0xdc, 0x7f, 0xaa, 0x2d, 0xca, 0x16, 0x7d, 0x10, 0x34, 0x43, 0x2a, + 0xd6, 0xc8, 0x55, 0x28, 0xc4, 0x58, 0x7f, 0x49, 0x75, 0x5e, 0x04, 0x65, 0x11, 0xb9, 0x64, 0x95, + 0xaa, 0x05, 0xab, 0x0c, 0x25, 0xe9, 0xb7, 0x4a, 0xfe, 0x6f, 0x66, 0xe1, 0xe2, 0x13, 0x76, 0xb6, + 0x9d, 0xfa, 0x95, 0x06, 0x64, 0x05, 0x8a, 0x5d, 0xda, 0xc1, 0x8e, 0x3a, 0x42, 0x3a, 0x09, 0x8d, + 0x1d, 0x85, 0xed, 0x80, 0xa7, 0x39, 0x14, 0xc6, 0x04, 0x85, 0xaa, 0x05, 0x72, 0x1d, 0xe6, 0x9f, + 0x30, 0x7e, 0x16, 0xc6, 0x2f, 0x85, 0xd7, 0xe5, 0x8d, 0x22, 0xf2, 0x3c, 0x61, 0x1c, 0x01, 0x1c, + 0x4d, 0xd7, 0x10, 0x15, 0x46, 0x29, 0x2a, 0xcc, 0x8f, 0x42, 0x85, 0xe9, 0x2a, 0xd9, 0x84, 0x62, + 0x23, 0x0c, 0x12, 0x1e, 0x3b, 0x1e, 0x1a, 0x9e, 0x13, 0xcc, 0xef, 0x20, 0xb3, 0x4c, 0xec, 0x76, + 0x6f, 0x91, 0xea, 0x9c, 0x64, 0x1d, 0x80, 0xbd, 0xe6, 0xb1, 0xb3, 0x1f, 0x26, 0xdd, 0x17, 0x14, + 0xa0, 0x1c, 0x12, 0x0e, 0xea, 0x54, 0x5b, 0xb5, 0xde, 0x85, 0x4b, 0xfd, 0x11, 0x51, 0xa1, 0x7a, + 0x00, 0xff, 0x47, 0x99, 0xcf, 0x9c, 0x84, 0x4d, 0x1f, 0x2d, 0xcb, 0x84, 0xea, 0xb0, 0xb0, 0x52, + 0xfc, 0x9f, 0x1c, 0x14, 0x77, 0x5f, 0xb3, 0xc6, 0x11, 0x4b, 0x12, 0xa7, 0x25, 0xb0, 0x69, 0x3d, + 0x0e, 0x1b, 0x2c, 0x49, 0xba, 0xba, 0x7a, 0x04, 0xf2, 0x7d, 0xc8, 0x1f, 0x04, 0x1e, 0x57, 0xf7, + 0xe3, 0x6a, 0xe6, 0xd3, 0xc0, 0xe3, 0x4a, 0xe7, 0xfe, 0x0c, 0x15, 0x52, 0xe4, 0x3e, 0xe4, 0xb1, + 0xbb, 0x4c, 0xd2, 0xe1, 0x5d, 0x4d, 0x16, 0x65, 0xc8, 0x96, 0xf8, 0x84, 0xe7, 0x7d, 0xc9, 0x54, + 0x96, 0xd6, 0xb2, 0xaf, 0x26, 0xef, 0x4b, 0xd6, 0xd3, 0xa0, 0x24, 0xc9, 0x2e, 0x22, 0x6b, 0x27, + 0xe6, 0xcc, 0x55, 0xd9, 0xbb, 0x91, 0x05, 0x88, 0x24, 0x67, 0x4f, 0x4b, 0x2a, 0x8b, 0x41, 0xd8, + 0x7d, 0xed, 0x71, 0x55, 0x0d, 0x59, 0x41, 0x40, 0x36, 0xcd, 0x11, 0x9c, 0xa2, 0xf4, 0x4e, 0x18, + 0x30, 0x81, 0xed, 0xb3, 0xa5, 0x91, 0x4d, 0x93, 0xc6, 0x29, 0x86, 0xe1, 0xd8, 0x6b, 0x21, 0xce, + 0x5c, 0x18, 0x1b, 0x06, 0xc9, 0xa8, 0x85, 0x41, 0x12, 0xb6, 0xe6, 0x61, 0x4e, 0xc0, 0x20, 0xeb, + 0x77, 0x06, 0x14, 0xb5, 0x3c, 0x4d, 0x50, 0x77, 0xef, 0x41, 0x1e, 0x9f, 0xef, 0x2a, 0xff, 0x0b, + 0xa2, 0xea, 0x18, 0x77, 0xa8, 0xa0, 0x62, 0xe3, 0xd8, 0x73, 0x65, 0x53, 0x5c, 0xa2, 0x38, 0x44, + 0xca, 0x33, 0xde, 0x11, 0x29, 0x5b, 0xa0, 0x38, 0x24, 0x37, 0x61, 0xe1, 0x98, 0x35, 0xda, 0xb1, + 0xc7, 0x3b, 0x22, 0x09, 0xe5, 0x8d, 0x8a, 0x68, 0x27, 0x8a, 0x26, 0x8a, 0xb3, 0xcb, 0x61, 0x3d, + 0xc6, 0xc3, 0xd9, 0xdb, 0x20, 0x81, 0xfc, 0x36, 0xbe, 0xc8, 0x70, 0x67, 0x4b, 0x54, 0x8c, 0xf1, + 0x51, 0xbc, 0x3b, 0xee, 0x51, 0xbc, 0x9b, 0x3e, 0x8a, 0xfb, 0x93, 0x8a, 0xb7, 0x8f, 0x16, 0x64, + 0xeb, 0x11, 0x2c, 0x76, 0x0f, 0x1e, 0x29, 0xc3, 0xec, 0x9e, 0xab, 0x2c, 0xcd, 0xee, 0xb9, 0xe8, + 0xca, 0xee, 0xd3, 0x3d, 0x61, 0x65, 0x81, 0xe2, 0xb0, 0x0b, 0x12, 0x72, 0x1a, 0x48, 0xd8, 0xc4, + 0xe7, 0xbe, 0x76, 0xfa, 0x90, 0x89, 0x86, 0x67, 0x49, 0xba, 0x65, 0x1c, 0x4b, 0x37, 0xfc, 0x44, + 0xe8, 0x12, 0x6e, 0xf8, 0x89, 0x75, 0x0d, 0x96, 0xfa, 0xf2, 0x85, 0x4c, 0xe2, 0x7d, 0xa9, 0xb0, + 0x24, 0x8e, 0xd7, 0x19, 0x2c, 0x0f, 0x7c, 0x72, 0x22, 0xd7, 0xa1, 0x20, 0x3f, 0x6d, 0x54, 0x66, + 0xcc, 0xcb, 0x5f, 0x7f, 0xb3, 0xf2, 0xce, 0x00, 0x83, 0x5c, 0x44, 0xb6, 0xad, 0x76, 0xe0, 0xfa, + 0xac, 0x62, 0x8c, 0x64, 0x93, 0x8b, 0x66, 0xfe, 0xd7, 0x7f, 0xb8, 0x32, 0xb3, 0xee, 0xc0, 0x85, + 0xa1, 0xcf, 0x25, 0xe4, 0x1a, 0xe4, 0x8f, 0x99, 0xdf, 0x4c, 0xcd, 0x0c, 0x31, 0xe0, 0x22, 0xb9, + 0x0a, 0x39, 0xea, 0x9c, 0x55, 0x0c, 0xb3, 0xfa, 0xf5, 0x37, 0x2b, 0x97, 0x86, 0xbf, 0xb9, 0x38, + 0x67, 0xd2, 0xc4, 0xc6, 0x5f, 0x00, 0x16, 0x0f, 0x0f, 0xb7, 0xb6, 0x62, 0xcf, 0x6d, 0x31, 0xf2, + 0x4b, 0x03, 0xc8, 0xf0, 0xc3, 0x96, 0xdc, 0xc9, 0xae, 0xf1, 0xd1, 0x2f, 0x7f, 0xf3, 0xee, 0x94, + 0x52, 0x0a, 0x69, 0x7c, 0x0e, 0x73, 0x02, 0x1e, 0x93, 0x0f, 0x27, 0x7c, 0x25, 0x99, 0x6b, 0xe3, + 0x19, 0x95, 0xee, 0x06, 0x2c, 0xa4, 0x10, 0x93, 0xac, 0x67, 0x6e, 0xaf, 0x0f, 0x41, 0x9b, 0x1f, + 0x4d, 0xc4, 0xab, 0x8c, 0xfc, 0x14, 0xe6, 0x15, 0x72, 0x24, 0x37, 0xc6, 0xc8, 0xf5, 0x30, 0xac, + 0xb9, 0x3e, 0x09, 0x6b, 0xcf, 0x8d, 0x14, 0x21, 0x66, 0xba, 0x31, 0x80, 0x3f, 0x33, 0xdd, 0x18, + 0x82, 0x9c, 0x8d, 0xde, 0xbb, 0x32, 0xd3, 0xc8, 0x00, 0xde, 0xcc, 0x34, 0x32, 0x08, 0x3b, 0xc9, + 0x73, 0xc8, 0x23, 0xec, 0x24, 0x59, 0xed, 0x57, 0xc3, 0xa5, 0x66, 0xd6, 0x99, 0xe8, 0xc3, 0xab, + 0x3f, 0xc1, 0x6b, 0x4a, 0x7c, 0x42, 0xc8, 0xbe, 0xa0, 0xb4, 0x2f, 0x82, 0xe6, 0x8d, 0x09, 0x38, + 0x7b, 0xea, 0xd5, 0xf3, 0x7b, 0x6d, 0x82, 0xcf, 0x72, 0xe3, 0xd5, 0x0f, 0x7c, 0x00, 0x0c, 0xa1, + 0xa4, 0xa3, 0x0f, 0x62, 0x67, 0x88, 0x8e, 0x00, 0x6e, 0x66, 0x6d, 0x62, 0x7e, 0x65, 0xf0, 0x2b, + 0x7c, 0x7b, 0xf5, 0x23, 0x13, 0xb2, 0x91, 0x19, 0x8e, 0x91, 0x18, 0xc8, 0xbc, 0x3d, 0x95, 0x8c, + 0x32, 0xee, 0x48, 0xe4, 0xa3, 0xd0, 0x0d, 0xc9, 0xbe, 0xc8, 0xbb, 0x08, 0xc9, 0x9c, 0x90, 0x6f, + 0xcd, 0xb8, 0x65, 0xe0, 0x39, 0x43, 0xc4, 0x9b, 0xa9, 0x5b, 0x7b, 0x0a, 0x64, 0x9e, 0x33, 0x1d, + 0x3a, 0x6f, 0x95, 0xbe, 0x7d, 0x73, 0xc5, 0xf8, 0xfb, 0x9b, 0x2b, 0xc6, 0x3f, 0xdf, 0x5c, 0x31, + 0x4e, 0x0a, 0xe2, 0x7f, 0xce, 0xdb, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x91, 0xe5, 0xca, + 0x70, 0x1e, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2414,6 +2806,8 @@ type LLBBridgeClient interface { ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) // apicaps:CapStatFile StatFile(ctx context.Context, in *StatFileRequest, opts ...grpc.CallOption) (*StatFileResponse, error) + // apicaps:CapGatewayEvaluate + Evaluate(ctx context.Context, in *EvaluateRequest, opts ...grpc.CallOption) (*EvaluateResponse, error) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) Return(ctx context.Context, in *ReturnRequest, opts ...grpc.CallOption) (*ReturnResponse, error) // apicaps:CapFrontendInputs @@ -2478,6 +2872,15 @@ func (c *lLBBridgeClient) StatFile(ctx context.Context, in *StatFileRequest, opt return out, nil } +func (c *lLBBridgeClient) Evaluate(ctx context.Context, in *EvaluateRequest, opts ...grpc.CallOption) (*EvaluateResponse, error) { + out := new(EvaluateResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Evaluate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *lLBBridgeClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) { out := new(PongResponse) err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Ping", in, out, opts...) @@ -2575,6 +2978,8 @@ type LLBBridgeServer interface { ReadDir(context.Context, *ReadDirRequest) (*ReadDirResponse, error) // apicaps:CapStatFile StatFile(context.Context, *StatFileRequest) (*StatFileResponse, error) + // apicaps:CapGatewayEvaluate + Evaluate(context.Context, *EvaluateRequest) (*EvaluateResponse, error) Ping(context.Context, *PingRequest) (*PongResponse, error) Return(context.Context, *ReturnRequest) (*ReturnResponse, error) // apicaps:CapFrontendInputs @@ -2605,6 +3010,9 @@ func (*UnimplementedLLBBridgeServer) ReadDir(ctx context.Context, req *ReadDirRe func (*UnimplementedLLBBridgeServer) StatFile(ctx context.Context, req *StatFileRequest) (*StatFileResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method StatFile not implemented") } +func (*UnimplementedLLBBridgeServer) Evaluate(ctx context.Context, req *EvaluateRequest) (*EvaluateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Evaluate not implemented") +} func (*UnimplementedLLBBridgeServer) Ping(ctx context.Context, req *PingRequest) (*PongResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") } @@ -2721,6 +3129,24 @@ func _LLBBridge_StatFile_Handler(srv interface{}, ctx context.Context, dec func( return interceptor(ctx, in, info, handler) } +func _LLBBridge_Evaluate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(EvaluateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).Evaluate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Evaluate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).Evaluate(ctx, req.(*EvaluateRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _LLBBridge_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(PingRequest) if err := dec(in); err != nil { @@ -2879,6 +3305,10 @@ var _LLBBridge_serviceDesc = grpc.ServiceDesc{ MethodName: "StatFile", Handler: _LLBBridge_StatFile_Handler, }, + { + MethodName: "Evaluate", + Handler: _LLBBridge_Evaluate_Handler, + }, { MethodName: "Ping", Handler: _LLBBridge_Ping_Handler, @@ -2939,6 +3369,32 @@ func (m *Result) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Attestations) > 0 { + for k := range m.Attestations { + v := m.Attestations[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } if len(m.Metadata) > 0 { for k := range m.Metadata { v := m.Metadata[k] @@ -3194,6 +3650,188 @@ func (m *RefMap) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Attestations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Attestations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Attestations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Attestation) > 0 { + for iNdEx := len(m.Attestation) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attestation[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Attestation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Attestation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Attestation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.InTotoSubjects) > 0 { + for iNdEx := len(m.InTotoSubjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InTotoSubjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.InTotoPredicateType) > 0 { + i -= len(m.InTotoPredicateType) + copy(dAtA[i:], m.InTotoPredicateType) + i = encodeVarintGateway(dAtA, i, uint64(len(m.InTotoPredicateType))) + i-- + dAtA[i] = 0x2a + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x22 + } + if m.Ref != nil { + { + size, err := m.Ref.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Metadata) > 0 { + for k := range m.Metadata { + v := m.Metadata[k] + baseI := i + if len(v) > 0 { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGateway(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.Kind != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Kind)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *InTotoSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InTotoSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InTotoSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.Digest) > 0 { + for iNdEx := len(m.Digest) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Digest[iNdEx]) + copy(dAtA[i:], m.Digest[iNdEx]) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Digest[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.Kind != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Kind)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *ReturnRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3376,6 +4014,25 @@ func (m *ResolveImageConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, erro i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.StoreID) > 0 { + i -= len(m.StoreID) + copy(dAtA[i:], m.StoreID) + i = encodeVarintGateway(dAtA, i, uint64(len(m.StoreID))) + i-- + dAtA[i] = 0x3a + } + if len(m.SessionID) > 0 { + i -= len(m.SessionID) + copy(dAtA[i:], m.SessionID) + i = encodeVarintGateway(dAtA, i, uint64(len(m.SessionID))) + i-- + dAtA[i] = 0x32 + } + if m.ResolverType != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.ResolverType)) + i-- + dAtA[i] = 0x28 + } if len(m.LogName) > 0 { i -= len(m.LogName) copy(dAtA[i:], m.LogName) @@ -3477,6 +4134,20 @@ func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.SourcePolicies) > 0 { + for iNdEx := len(m.SourcePolicies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.SourcePolicies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + } if m.Evaluate { i-- if m.Evaluate { @@ -3564,15 +4235,6 @@ func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - if len(m.ImportCacheRefsDeprecated) > 0 { - for iNdEx := len(m.ImportCacheRefsDeprecated) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ImportCacheRefsDeprecated[iNdEx]) - copy(dAtA[i:], m.ImportCacheRefsDeprecated[iNdEx]) - i = encodeVarintGateway(dAtA, i, uint64(len(m.ImportCacheRefsDeprecated[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } if len(m.FrontendOpt) > 0 { for k := range m.FrontendOpt { v := m.FrontendOpt[k] @@ -4006,6 +4668,67 @@ func (m *StatFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *EvaluateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvaluateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvaluateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EvaluateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EvaluateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EvaluateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + func (m *PingRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4626,20 +5349,20 @@ func (m *InitMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x20 } if len(m.Fds) > 0 { - dAtA26 := make([]byte, len(m.Fds)*10) - var j25 int + dAtA28 := make([]byte, len(m.Fds)*10) + var j27 int for _, num := range m.Fds { for num >= 1<<7 { - dAtA26[j25] = uint8(uint64(num)&0x7f | 0x80) + dAtA28[j27] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j25++ + j27++ } - dAtA26[j25] = uint8(num) - j25++ + dAtA28[j27] = uint8(num) + j27++ } - i -= j25 - copy(dAtA[i:], dAtA26[:j25]) - i = encodeVarintGateway(dAtA, i, uint64(j25)) + i -= j27 + copy(dAtA[i:], dAtA28[:j27]) + i = encodeVarintGateway(dAtA, i, uint64(j27)) i-- dAtA[i] = 0x1a } @@ -4915,6 +5638,19 @@ func (m *Result) Size() (n int) { n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) } } + if len(m.Attestations) > 0 { + for k, v := range m.Attestations { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovGateway(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5032,6 +5768,94 @@ func (m *RefMap) Size() (n int) { return n } +func (m *Attestations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attestation) > 0 { + for _, e := range m.Attestation { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Attestation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != 0 { + n += 1 + sovGateway(uint64(m.Kind)) + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + l = 0 + if len(v) > 0 { + l = 1 + len(v) + sovGateway(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if m.Ref != nil { + l = m.Ref.Size() + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.InTotoPredicateType) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.InTotoSubjects) > 0 { + for _, e := range m.InTotoSubjects { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InTotoSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != 0 { + n += 1 + sovGateway(uint64(m.Kind)) + } + if len(m.Digest) > 0 { + for _, s := range m.Digest { + l = len(s) + n += 1 + l + sovGateway(uint64(l)) + } + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *ReturnRequest) Size() (n int) { if m == nil { return 0 @@ -5123,6 +5947,17 @@ func (m *ResolveImageConfigRequest) Size() (n int) { if l > 0 { n += 1 + l + sovGateway(uint64(l)) } + if m.ResolverType != 0 { + n += 1 + sovGateway(uint64(m.ResolverType)) + } + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.StoreID) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5171,12 +6006,6 @@ func (m *SolveRequest) Size() (n int) { n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) } } - if len(m.ImportCacheRefsDeprecated) > 0 { - for _, s := range m.ImportCacheRefsDeprecated { - l = len(s) - n += 1 + l + sovGateway(uint64(l)) - } - } if m.AllowResultReturn { n += 2 } @@ -5212,6 +6041,12 @@ func (m *SolveRequest) Size() (n int) { if m.Evaluate { n += 2 } + if len(m.SourcePolicies) > 0 { + for _, e := range m.SourcePolicies { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5398,6 +6233,34 @@ func (m *StatFileResponse) Size() (n int) { return n } +func (m *EvaluateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EvaluateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *PingRequest) Size() (n int) { if m == nil { return 0 @@ -6109,6 +6972,135 @@ func (m *Result) Unmarshal(dAtA []byte) error { } m.Metadata[mapkey] = mapvalue iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attestations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attestations == nil { + m.Attestations = make(map[string]*Attestations) + } + var mapkey string + var mapvalue *Attestations + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGateway + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGateway + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Attestations{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attestations[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -6608,6 +7600,557 @@ func (m *RefMap) Unmarshal(dAtA []byte) error { } return nil } +func (m *Attestations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Attestations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Attestations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attestation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attestation = append(m.Attestation, &Attestation{}) + if err := m.Attestation[len(m.Attestation)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Attestation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Attestation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Attestation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= AttestationKind(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string][]byte) + } + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGateway + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthGateway + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Ref == nil { + m.Ref = &Ref{} + } + if err := m.Ref.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InTotoPredicateType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InTotoPredicateType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InTotoSubjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InTotoSubjects = append(m.InTotoSubjects, &InTotoSubject{}) + if err := m.InTotoSubjects[len(m.InTotoSubjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InTotoSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InTotoSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InTotoSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= InTotoSubjectKind(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = append(m.Digest, github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ReturnRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -7174,6 +8717,89 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { } m.LogName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResolverType", wireType) + } + m.ResolverType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResolverType |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoreID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoreID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -7537,38 +9163,6 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { } m.FrontendOpt[mapkey] = mapvalue iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImportCacheRefsDeprecated", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImportCacheRefsDeprecated = append(m.ImportCacheRefsDeprecated, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field AllowResultReturn", wireType) @@ -7846,6 +9440,40 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { } } m.Evaluate = bool(v != 0) + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePolicies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourcePolicies = append(m.SourcePolicies, &pb1.Policy{}) + if err := m.SourcePolicies[len(m.SourcePolicies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -8956,6 +10584,140 @@ func (m *StatFileResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *EvaluateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvaluateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvaluateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EvaluateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EvaluateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EvaluateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PingRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -9065,7 +10827,7 @@ func (m *PongResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FrontendAPICaps = append(m.FrontendAPICaps, pb1.APICap{}) + m.FrontendAPICaps = append(m.FrontendAPICaps, pb2.APICap{}) if err := m.FrontendAPICaps[len(m.FrontendAPICaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -9099,7 +10861,7 @@ func (m *PongResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.LLBCaps = append(m.LLBCaps, pb1.APICap{}) + m.LLBCaps = append(m.LLBCaps, pb2.APICap{}) if err := m.LLBCaps[len(m.LLBCaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto index 31aaf3b20d..2e55f1db86 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto @@ -8,6 +8,7 @@ import "github.com/moby/buildkit/solver/pb/ops.proto"; import "github.com/moby/buildkit/api/types/worker.proto"; import "github.com/moby/buildkit/util/apicaps/pb/caps.proto"; import "github.com/tonistiigi/fsutil/types/stat.proto"; +import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto"; option (gogoproto.sizer_all) = true; @@ -25,6 +26,8 @@ service LLBBridge { rpc ReadDir(ReadDirRequest) returns (ReadDirResponse); // apicaps:CapStatFile rpc StatFile(StatFileRequest) returns (StatFileResponse); + // apicaps:CapGatewayEvaluate + rpc Evaluate(EvaluateRequest) returns (EvaluateResponse); rpc Ping(PingRequest) returns (PongResponse); rpc Return(ReturnRequest) returns (ReturnResponse); // apicaps:CapFrontendInputs @@ -48,6 +51,8 @@ message Result { RefMap refs = 4; } map metadata = 10; + // 11 was used during development and is reserved for old attestation format + map attestations = 12; } message RefMapDeprecated { @@ -63,6 +68,39 @@ message RefMap { map refs = 1; } +message Attestations { + repeated Attestation attestation = 1; +} + +message Attestation { + AttestationKind kind = 1; + map metadata = 2; + + Ref ref = 3; + string path = 4; + string inTotoPredicateType = 5; + repeated InTotoSubject inTotoSubjects = 6; +} + +enum AttestationKind { + option (gogoproto.goproto_enum_prefix) = false; + InToto = 0 [(gogoproto.enumvalue_customname) = "AttestationKindInToto"]; + Bundle = 1 [(gogoproto.enumvalue_customname) = "AttestationKindBundle"]; +} + +message InTotoSubject { + InTotoSubjectKind kind = 1; + + repeated string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string name = 3; +} + +enum InTotoSubjectKind { + option (gogoproto.goproto_enum_prefix) = false; + Self = 0 [(gogoproto.enumvalue_customname) = "InTotoSubjectKindSelf"]; + Raw = 1 [(gogoproto.enumvalue_customname) = "InTotoSubjectKindRaw"]; +} + message ReturnRequest { Result result = 1; google.rpc.Status error = 2; @@ -83,6 +121,9 @@ message ResolveImageConfigRequest { pb.Platform Platform = 2; string ResolveMode = 3; string LogName = 4; + int32 ResolverType = 5; + string SessionID = 6; + string StoreID = 7; } message ResolveImageConfigResponse { @@ -94,11 +135,7 @@ message SolveRequest { pb.Definition Definition = 1; string Frontend = 2; map FrontendOpt = 3; - // ImportCacheRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0. - // When ImportCacheRefsDeprecated is set, the solver appends - // {.Type = "registry", .Attrs = {"ref": importCacheRef}} - // for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed) - repeated string ImportCacheRefsDeprecated = 4; + // 4 was removed in BuildKit v0.11.0. bool allowResultReturn = 5; bool allowResultArrayRef = 6; @@ -113,6 +150,8 @@ message SolveRequest { map FrontendInputs = 13; bool Evaluate = 14; + + repeated moby.buildkit.v1.sourcepolicy.Policy SourcePolicies = 15; } // CacheOptionsEntry corresponds to the control.CacheOptionsEntry @@ -165,6 +204,13 @@ message StatFileResponse { fsutil.types.Stat stat = 1; } +message EvaluateRequest { + string Ref = 1; +} + +message EvaluateResponse { +} + message PingRequest{ } message PongResponse{ diff --git a/vendor/github.com/moby/buildkit/frontend/result.go b/vendor/github.com/moby/buildkit/frontend/result.go deleted file mode 100644 index 5afc10c9f8..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/result.go +++ /dev/null @@ -1,25 +0,0 @@ -package frontend - -import ( - "github.com/moby/buildkit/solver" -) - -type Result struct { - Ref solver.ResultProxy - Refs map[string]solver.ResultProxy - Metadata map[string][]byte -} - -func (r *Result) EachRef(fn func(solver.ResultProxy) error) (err error) { - if r.Ref != nil { - err = fn(r.Ref) - } - for _, r := range r.Refs { - if r != nil { - if err1 := fn(r); err1 != nil && err == nil { - err = err1 - } - } - } - return err -} diff --git a/vendor/github.com/moby/buildkit/frontend/subrequests/describe.go b/vendor/github.com/moby/buildkit/frontend/subrequests/describe.go index cc8053ed24..832c9a839f 100644 --- a/vendor/github.com/moby/buildkit/frontend/subrequests/describe.go +++ b/vendor/github.com/moby/buildkit/frontend/subrequests/describe.go @@ -3,6 +3,10 @@ package subrequests import ( "context" "encoding/json" + "fmt" + "io" + "strings" + "text/tabwriter" "github.com/moby/buildkit/frontend/gateway/client" gwpb "github.com/moby/buildkit/frontend/gateway/pb" @@ -18,9 +22,8 @@ var SubrequestsDescribeDefinition = Request{ Type: TypeRPC, Description: "List available subrequest types", Metadata: []Named{ - { - Name: "result.json", - }, + {Name: "result.json"}, + {Name: "result.txt"}, }, } @@ -61,3 +64,18 @@ func Describe(ctx context.Context, c client.Client) ([]Request, error) { } return reqs, nil } + +func PrintDescribe(dt []byte, w io.Writer) error { + var d []Request + if err := json.Unmarshal(dt, &d); err != nil { + return err + } + + tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0) + fmt.Fprintf(tw, "NAME\tVERSION\tDESCRIPTION\n") + + for _, r := range d { + fmt.Fprintf(tw, "%s\t%s\t%s\n", strings.TrimPrefix(r.Name, "frontend."), r.Version, r.Description) + } + return tw.Flush() +} diff --git a/vendor/github.com/moby/buildkit/frontend/subrequests/outline/outline.go b/vendor/github.com/moby/buildkit/frontend/subrequests/outline/outline.go new file mode 100644 index 0000000000..c0a376b0f9 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/subrequests/outline/outline.go @@ -0,0 +1,146 @@ +package outline + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "text/tabwriter" + + "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/frontend/subrequests" + "github.com/moby/buildkit/solver/pb" +) + +const RequestSubrequestsOutline = "frontend.outline" + +var SubrequestsOutlineDefinition = subrequests.Request{ + Name: RequestSubrequestsOutline, + Version: "1.0.0", + Type: subrequests.TypeRPC, + Description: "List all parameters current build target supports", + Opts: []subrequests.Named{ + { + Name: "target", + Description: "Target build stage", + }, + }, + Metadata: []subrequests.Named{ + {Name: "result.json"}, + {Name: "result.txt"}, + }, +} + +type Outline struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Args []Arg `json:"args,omitempty"` + Secrets []Secret `json:"secrets,omitempty"` + SSH []SSH `json:"ssh,omitempty"` + Cache []CacheMount `json:"cache,omitempty"` + Sources [][]byte `json:"sources,omitempty"` +} + +func (o Outline) ToResult() (*client.Result, error) { + res := client.NewResult() + dt, err := json.MarshalIndent(o, "", " ") + if err != nil { + return nil, err + } + res.AddMeta("result.json", dt) + + b := bytes.NewBuffer(nil) + if err := PrintOutline(dt, b); err != nil { + return nil, err + } + res.AddMeta("result.txt", b.Bytes()) + + res.AddMeta("version", []byte(SubrequestsOutlineDefinition.Version)) + return res, nil +} + +type Arg struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + Value string `json:"value,omitempty"` + Location *pb.Location `json:"location,omitempty"` +} + +type Secret struct { + Name string `json:"name"` + Required bool `json:"required,omitempty"` + Location *pb.Location `json:"location,omitempty"` +} + +type SSH struct { + Name string `json:"name"` + Required bool `json:"required,omitempty"` + Location *pb.Location `json:"location,omitempty"` +} + +type CacheMount struct { + ID string `json:"ID"` + Location *pb.Location `json:"location,omitempty"` +} + +func PrintOutline(dt []byte, w io.Writer) error { + var o Outline + + if err := json.Unmarshal(dt, &o); err != nil { + return err + } + + if o.Name != "" || o.Description != "" { + tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0) + name := o.Name + if o.Name == "" { + name = "(default)" + } + fmt.Fprintf(tw, "TARGET:\t%s\n", name) + if o.Description != "" { + fmt.Fprintf(tw, "DESCRIPTION:\t%s\n", o.Description) + } + tw.Flush() + fmt.Fprintln(tw) + } + + if len(o.Args) > 0 { + tw := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0) + fmt.Fprintf(tw, "BUILD ARG\tVALUE\tDESCRIPTION\n") + for _, a := range o.Args { + fmt.Fprintf(tw, "%s\t%s\t%s\n", a.Name, a.Value, a.Description) + } + tw.Flush() + fmt.Fprintln(tw) + } + + if len(o.Secrets) > 0 { + tw := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0) + fmt.Fprintf(tw, "SECRET\tREQUIRED\n") + for _, s := range o.Secrets { + b := "" + if s.Required { + b = "true" + } + fmt.Fprintf(tw, "%s\t%s\n", s.Name, b) + } + tw.Flush() + fmt.Fprintln(tw) + } + + if len(o.SSH) > 0 { + tw := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0) + fmt.Fprintf(tw, "SSH\tREQUIRED\n") + for _, s := range o.SSH { + b := "" + if s.Required { + b = "true" + } + fmt.Fprintf(tw, "%s\t%s\n", s.Name, b) + } + tw.Flush() + fmt.Fprintln(tw) + } + + return nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/subrequests/targets/targets.go b/vendor/github.com/moby/buildkit/frontend/subrequests/targets/targets.go new file mode 100644 index 0000000000..bf00a3b2bc --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/subrequests/targets/targets.go @@ -0,0 +1,84 @@ +package targets + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "text/tabwriter" + + "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/frontend/subrequests" + "github.com/moby/buildkit/solver/pb" +) + +const RequestTargets = "frontend.targets" + +var SubrequestsTargetsDefinition = subrequests.Request{ + Name: RequestTargets, + Version: "1.0.0", + Type: subrequests.TypeRPC, + Description: "List all targets current build supports", + Opts: []subrequests.Named{}, + Metadata: []subrequests.Named{ + {Name: "result.json"}, + {Name: "result.txt"}, + }, +} + +type List struct { + Targets []Target `json:"targets"` + Sources [][]byte `json:"sources"` +} + +func (l List) ToResult() (*client.Result, error) { + res := client.NewResult() + dt, err := json.MarshalIndent(l, "", " ") + if err != nil { + return nil, err + } + res.AddMeta("result.json", dt) + + b := bytes.NewBuffer(nil) + if err := PrintTargets(dt, b); err != nil { + return nil, err + } + res.AddMeta("result.txt", b.Bytes()) + + res.AddMeta("version", []byte(SubrequestsTargetsDefinition.Version)) + return res, nil +} + +type Target struct { + Name string `json:"name,omitempty"` + Default bool `json:"default,omitempty"` + Description string `json:"description,omitempty"` + Base string `json:"base,omitempty"` + Platform string `json:"platform,omitempty"` + Location *pb.Location `json:"location,omitempty"` +} + +func PrintTargets(dt []byte, w io.Writer) error { + var l List + + if err := json.Unmarshal(dt, &l); err != nil { + return err + } + + tw := tabwriter.NewWriter(w, 0, 0, 1, ' ', 0) + fmt.Fprintf(tw, "TARGET\tDESCRIPTION\n") + + for _, t := range l.Targets { + name := t.Name + if name == "" && t.Default { + name = "(default)" + } else { + if t.Default { + name = fmt.Sprintf("%s (default)", name) + } + } + fmt.Fprintf(tw, "%s\t%s\n", name, t.Description) + } + + return tw.Flush() +} diff --git a/vendor/github.com/moby/buildkit/identity/randomid.go b/vendor/github.com/moby/buildkit/identity/randomid.go index 0eb13527aa..2b8796f095 100644 --- a/vendor/github.com/moby/buildkit/identity/randomid.go +++ b/vendor/github.com/moby/buildkit/identity/randomid.go @@ -2,9 +2,10 @@ package identity import ( cryptorand "crypto/rand" - "fmt" "io" "math/big" + + "github.com/pkg/errors" ) var ( @@ -45,7 +46,7 @@ func NewID() string { var p [randomIDEntropyBytes]byte if _, err := io.ReadFull(idReader, p[:]); err != nil { - panic(fmt.Errorf("failed to read random bytes: %v", err)) + panic(errors.Wrap(err, "failed to read random bytes: %v")) } p[0] |= 0x80 // set high bit to avoid the need for padding diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.go b/vendor/github.com/moby/buildkit/session/auth/auth.go index 85e6f68053..232022ad23 100644 --- a/vendor/github.com/moby/buildkit/session/auth/auth.go +++ b/vendor/github.com/moby/buildkit/session/auth/auth.go @@ -2,8 +2,8 @@ package auth import ( "context" + "crypto/rand" "crypto/subtle" - "math/rand" "sync" "github.com/moby/buildkit/session" diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.go index ae3f29f86c..e313542629 100644 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync.go +++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.go @@ -27,27 +27,35 @@ const ( ) type fsSyncProvider struct { - dirs map[string]SyncedDir + dirs DirSource p progressCb doneCh chan error } type SyncedDir struct { - Name string Dir string Excludes []string - Map func(string, *fstypes.Stat) bool + Map func(string, *fstypes.Stat) fsutil.MapResult +} + +type DirSource interface { + LookupDir(string) (SyncedDir, bool) +} + +type StaticDirSource map[string]SyncedDir + +var _ DirSource = StaticDirSource{} + +func (dirs StaticDirSource) LookupDir(name string) (SyncedDir, bool) { + dir, found := dirs[name] + return dir, found } // NewFSSyncProvider creates a new provider for sending files from client -func NewFSSyncProvider(dirs []SyncedDir) session.Attachable { - p := &fsSyncProvider{ - dirs: map[string]SyncedDir{}, +func NewFSSyncProvider(dirs DirSource) session.Attachable { + return &fsSyncProvider{ + dirs: dirs, } - for _, d := range dirs { - p.dirs[d.Name] = d - } - return p } func (sp *fsSyncProvider) Register(server *grpc.Server) { @@ -81,7 +89,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr dirName = name[0] } - dir, ok := sp.dirs[dirName] + dir, ok := sp.dirs.LookupDir(dirName) if !ok { return InvalidSessionError{status.Errorf(codes.NotFound, "no access allowed to dir %q", dirName)} } diff --git a/vendor/github.com/moby/buildkit/session/manager.go b/vendor/github.com/moby/buildkit/session/manager.go index edac93063c..2678e6738d 100644 --- a/vendor/github.com/moby/buildkit/session/manager.go +++ b/vendor/github.com/moby/buildkit/session/manager.go @@ -160,12 +160,10 @@ func (sm *Manager) Get(ctx context.Context, id string, noWait bool) (Caller, err defer cancel() go func() { - select { - case <-ctx.Done(): - sm.mu.Lock() - sm.updateCondition.Broadcast() - sm.mu.Unlock() - } + <-ctx.Done() + sm.mu.Lock() + sm.updateCondition.Broadcast() + sm.mu.Unlock() }() var c *client diff --git a/vendor/github.com/moby/buildkit/session/sshforward/copy.go b/vendor/github.com/moby/buildkit/session/sshforward/copy.go index 6db4148949..a4a065b46e 100644 --- a/vendor/github.com/moby/buildkit/session/sshforward/copy.go +++ b/vendor/github.com/moby/buildkit/session/sshforward/copy.go @@ -1,10 +1,10 @@ package sshforward import ( - io "io" + "context" + "io" "github.com/pkg/errors" - context "golang.org/x/net/context" "golang.org/x/sync/errgroup" ) @@ -14,16 +14,24 @@ type Stream interface { } func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStream func() error) error { + defer conn.Close() g, ctx := errgroup.WithContext(ctx) g.Go(func() (retErr error) { p := &BytesMessage{} for { if err := stream.RecvMsg(p); err != nil { - conn.Close() if err == io.EOF { + // indicates client performed CloseSend, but they may still be + // reading data + if conn, ok := conn.(interface { + CloseWrite() error + }); ok { + conn.CloseWrite() + } return nil } + conn.Close() return errors.WithStack(err) } select { diff --git a/vendor/github.com/moby/buildkit/session/sshforward/ssh.go b/vendor/github.com/moby/buildkit/session/sshforward/ssh.go index a7a4c2e228..a808fcb1f0 100644 --- a/vendor/github.com/moby/buildkit/session/sshforward/ssh.go +++ b/vendor/github.com/moby/buildkit/session/sshforward/ssh.go @@ -1,14 +1,13 @@ package sshforward import ( - "io/ioutil" + "context" "net" "os" "path/filepath" "github.com/moby/buildkit/session" "github.com/pkg/errors" - context "golang.org/x/net/context" "golang.org/x/sync/errgroup" "google.golang.org/grpc/metadata" ) @@ -64,7 +63,7 @@ type SocketOpt struct { } func MountSSHSocket(ctx context.Context, c session.Caller, opt SocketOpt) (sockPath string, closer func() error, err error) { - dir, err := ioutil.TempDir("", ".buildkit-ssh-sock") + dir, err := os.MkdirTemp("", ".buildkit-ssh-sock") if err != nil { return "", nil, errors.WithStack(err) } diff --git a/vendor/github.com/moby/buildkit/snapshot/diffapply_unix.go b/vendor/github.com/moby/buildkit/snapshot/diffapply_unix.go index 5010519365..74cf40ab7b 100644 --- a/vendor/github.com/moby/buildkit/snapshot/diffapply_unix.go +++ b/vendor/github.com/moby/buildkit/snapshot/diffapply_unix.go @@ -14,9 +14,9 @@ import ( "github.com/containerd/containerd/leases" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/snapshots" + "github.com/containerd/containerd/snapshots/overlay/overlayutils" "github.com/containerd/continuity/fs" "github.com/containerd/continuity/sysx" - "github.com/containerd/stargz-snapshotter/snapshot/overlayutils" "github.com/hashicorp/go-multierror" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/util/bklog" diff --git a/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go b/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go index ef73e263fc..27cff3ebdf 100644 --- a/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go +++ b/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go @@ -4,7 +4,6 @@ package snapshot import ( - "io/ioutil" "os" "syscall" @@ -38,7 +37,7 @@ func (lm *localMounter) Mount() (string, error) { } } - dir, err := ioutil.TempDir("", "buildkit-mount") + dir, err := os.MkdirTemp("", "buildkit-mount") if err != nil { return "", errors.Wrap(err, "failed to create temp dir") } diff --git a/vendor/github.com/moby/buildkit/solver/cachekey.go b/vendor/github.com/moby/buildkit/solver/cachekey.go index 3749af0ab3..398368716a 100644 --- a/vendor/github.com/moby/buildkit/solver/cachekey.go +++ b/vendor/github.com/moby/buildkit/solver/cachekey.go @@ -7,10 +7,11 @@ import ( ) // NewCacheKey creates a new cache key for a specific output index -func NewCacheKey(dgst digest.Digest, output Index) *CacheKey { +func NewCacheKey(dgst, vtx digest.Digest, output Index) *CacheKey { return &CacheKey{ ID: rootKey(dgst, output).String(), digest: dgst, + vtx: vtx, output: output, ids: map[*cacheManager]string{}, } @@ -29,6 +30,7 @@ type CacheKey struct { ID string deps [][]CacheKeyWithSelector // only [][]*inMemoryCacheKey digest digest.Digest + vtx digest.Digest output Index ids map[*cacheManager]string @@ -56,6 +58,7 @@ func (ck *CacheKey) clone() *CacheKey { nk := &CacheKey{ ID: ck.ID, digest: ck.digest, + vtx: ck.vtx, output: ck.output, ids: map[*cacheManager]string{}, } diff --git a/vendor/github.com/moby/buildkit/solver/cacheopts.go b/vendor/github.com/moby/buildkit/solver/cacheopts.go index d5821b4e91..4b661471ed 100644 --- a/vendor/github.com/moby/buildkit/solver/cacheopts.go +++ b/vendor/github.com/moby/buildkit/solver/cacheopts.go @@ -4,12 +4,15 @@ import ( "context" "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/progress" digest "github.com/opencontainers/go-digest" ) type CacheOpts map[interface{}]interface{} +type progressKey struct{} + type cacheOptGetterKey struct{} func CacheOptGetterOf(ctx context.Context) func(includeAncestors bool, keys ...interface{}) map[interface{}]interface{} { @@ -91,3 +94,15 @@ func walkAncestors(ctx context.Context, start *state, f func(*state) bool) { } } } + +func ProgressControllerFromContext(ctx context.Context) progress.Controller { + var pg progress.Controller + if optGetter := CacheOptGetterOf(ctx); optGetter != nil { + if kv := optGetter(false, progressKey{}); kv != nil { + if v, ok := kv[progressKey{}].(progress.Controller); ok { + pg = v + } + } + } + return pg +} diff --git a/vendor/github.com/moby/buildkit/solver/edge.go b/vendor/github.com/moby/buildkit/solver/edge.go index 8504d9f657..5e3068010f 100644 --- a/vendor/github.com/moby/buildkit/solver/edge.go +++ b/vendor/github.com/moby/buildkit/solver/edge.go @@ -136,11 +136,11 @@ func (e *edge) release() { // commitOptions returns parameters for the op execution func (e *edge) commitOptions() ([]*CacheKey, []CachedResult) { - k := NewCacheKey(e.cacheMap.Digest, e.edge.Index) + k := NewCacheKey(e.cacheMap.Digest, e.edge.Vertex.Digest(), e.edge.Index) if len(e.deps) == 0 { keys := make([]*CacheKey, 0, len(e.cacheMapDigests)) for _, dgst := range e.cacheMapDigests { - keys = append(keys, NewCacheKey(dgst, e.edge.Index)) + keys = append(keys, NewCacheKey(dgst, e.edge.Vertex.Digest(), e.edge.Index)) } return keys, nil } @@ -201,6 +201,7 @@ func (e *edge) probeCache(d *dep, depKeys []CacheKeyWithSelector) bool { } found := false for _, k := range keys { + k.vtx = e.edge.Vertex.Digest() if _, ok := d.keyMap[k.ID]; !ok { d.keyMap[k.ID] = k found = true @@ -275,7 +276,7 @@ func (e *edge) currentIndexKey() *CacheKey { } } - k := NewCacheKey(e.cacheMap.Digest, e.edge.Index) + k := NewCacheKey(e.cacheMap.Digest, e.edge.Vertex.Digest(), e.edge.Index) k.deps = keys return k @@ -317,10 +318,10 @@ func (e *edge) skipPhase2FastCache(dep *dep) bool { // previous calls. // To avoid deadlocks and resource leaks this function needs to follow // following rules: -// 1) this function needs to return unclosed outgoing requests if some incoming -// requests were not completed -// 2) this function may not return outgoing requests if it has completed all -// incoming requests +// 1. this function needs to return unclosed outgoing requests if some incoming +// requests were not completed +// 2. this function may not return outgoing requests if it has completed all +// incoming requests func (e *edge) unpark(incoming []pipe.Sender, updates, allPipes []pipe.Receiver, f *pipeFactory) { // process all incoming changes depChanged := false @@ -403,6 +404,7 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) { bklog.G(context.TODO()).Error(errors.Wrap(err, "invalid query response")) // make the build fail for this error } else { for _, k := range keys { + k.vtx = e.edge.Vertex.Digest() records, err := e.op.Cache().Records(k) if err != nil { bklog.G(context.TODO()).Errorf("error receiving cache records: %v", err) @@ -508,7 +510,7 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) { } else if !dep.slowCacheComplete { dgst := upt.Status().Value.(digest.Digest) if e.cacheMap.Deps[int(dep.index)].ComputeDigestFunc != nil && dgst != "" { - k := NewCacheKey(dgst, -1) + k := NewCacheKey(dgst, "", -1) dep.slowCacheKey = &ExportableCacheKey{CacheKey: k, Exporter: &exporter{k: k}} slowKeyExp := CacheKeyWithSelector{CacheKey: *dep.slowCacheKey} defKeys := make([]CacheKeyWithSelector, 0, len(dep.result.CacheKeys())) diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go b/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go index 5da34b6e59..e02cfb9696 100644 --- a/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go +++ b/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go @@ -186,6 +186,7 @@ type Solve struct { MountIDs []string `protobuf:"bytes,2,rep,name=mountIDs,proto3" json:"mountIDs,omitempty"` Op *pb.Op `protobuf:"bytes,3,opt,name=op,proto3" json:"op,omitempty"` // Types that are valid to be assigned to Subject: + // // *Solve_File // *Solve_Cache Subject isSolve_Subject `protobuf_oneof:"subject"` diff --git a/vendor/github.com/moby/buildkit/solver/exporter.go b/vendor/github.com/moby/buildkit/solver/exporter.go index 67ede42223..78ce77c2d2 100644 --- a/vendor/github.com/moby/buildkit/solver/exporter.go +++ b/vendor/github.com/moby/buildkit/solver/exporter.go @@ -96,12 +96,17 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach addRecord = *e.override } - if e.record == nil && len(e.k.Deps()) > 0 { + exportRecord := opt.ExportRoots + if len(e.k.Deps()) > 0 { + exportRecord = true + } + + if e.record == nil && exportRecord { e.record = getBestResult(e.records) } var remote *Remote - if v := e.record; v != nil && len(e.k.Deps()) > 0 && addRecord { + if v := e.record; v != nil && exportRecord && addRecord { var variants []CacheExporterRecord cm := v.cacheManager @@ -121,7 +126,7 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach if opt.CompressionOpt != nil { for _, r := range remotes { // record all remaining remotes as well rec := t.Add(recKey) - rec.AddResult(v.CreatedAt, r) + rec.AddResult(e.k.vtx, int(e.k.output), v.CreatedAt, r) variants = append(variants, rec) } } @@ -142,7 +147,7 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach if opt.CompressionOpt != nil { for _, r := range remotes { // record all remaining remotes as well rec := t.Add(recKey) - rec.AddResult(v.CreatedAt, r) + rec.AddResult(e.k.vtx, int(e.k.output), v.CreatedAt, r) variants = append(variants, rec) } } @@ -150,7 +155,7 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach if remote != nil { for _, rec := range allRec { - rec.AddResult(v.CreatedAt, remote) + rec.AddResult(e.k.vtx, int(e.k.output), v.CreatedAt, remote) } } allRec = append(allRec, variants...) diff --git a/vendor/github.com/moby/buildkit/solver/jobs.go b/vendor/github.com/moby/buildkit/solver/jobs.go index 25cb93d599..465534d934 100644 --- a/vendor/github.com/moby/buildkit/solver/jobs.go +++ b/vendor/github.com/moby/buildkit/solver/jobs.go @@ -12,9 +12,11 @@ import ( "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/progress/controller" "github.com/moby/buildkit/util/tracing" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" ) @@ -22,7 +24,7 @@ import ( type ResolveOpFunc func(Vertex, Builder) (Op, error) type Builder interface { - Build(ctx context.Context, e Edge) (CachedResult, BuildSources, error) + Build(ctx context.Context, e Edge) (CachedResultWithProvenance, error) InContext(ctx context.Context, f func(ctx context.Context, g session.Group) error) error EachValue(ctx context.Context, key string, fn func(interface{}) error) error } @@ -197,16 +199,16 @@ type subBuilder struct { exporters []ExportableCacheKey } -func (sb *subBuilder) Build(ctx context.Context, e Edge) (CachedResult, BuildSources, error) { +func (sb *subBuilder) Build(ctx context.Context, e Edge) (CachedResultWithProvenance, error) { // TODO(@crazy-max): Handle BuildInfo from subbuild res, err := sb.solver.subBuild(ctx, e, sb.vtx) if err != nil { - return nil, nil, err + return nil, err } sb.mu.Lock() sb.exporters = append(sb.exporters, res.CacheKeys()[0]) // all keys already have full export chain sb.mu.Unlock() - return res, nil, nil + return &withProvenance{CachedResult: res}, nil } func (sb *subBuilder) InContext(ctx context.Context, f func(context.Context, session.Group) error) error { @@ -229,15 +231,18 @@ func (sb *subBuilder) EachValue(ctx context.Context, key string, fn func(interfa } type Job struct { - list *Solver - pr *progress.MultiReader - pw progress.Writer - span trace.Span - values sync.Map - id string + list *Solver + pr *progress.MultiReader + pw progress.Writer + span trace.Span + values sync.Map + id string + startedTime time.Time + completedTime time.Time progressCloser func() SessionID string + uniqueID string // unique ID is used for provenance. We use a different field that client can't control } type SolverOpt struct { @@ -447,6 +452,8 @@ func (jl *Solver) NewJob(id string) (*Job, error) { progressCloser: progressCloser, span: span, id: id, + startedTime: time.Now(), + uniqueID: identity.NewID(), } jl.jobs[id] = j @@ -496,48 +503,70 @@ func (jl *Solver) deleteIfUnreferenced(k digest.Digest, st *state) { } } -func (j *Job) Build(ctx context.Context, e Edge) (CachedResult, BuildSources, error) { +func (j *Job) Build(ctx context.Context, e Edge) (CachedResultWithProvenance, error) { if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { j.span = span } v, err := j.list.load(e.Vertex, nil, j) if err != nil { - return nil, nil, err + return nil, err } e.Vertex = v res, err := j.list.s.build(ctx, e) if err != nil { - return nil, nil, err + return nil, err } j.list.mu.Lock() defer j.list.mu.Unlock() - return res, j.walkBuildSources(ctx, e, make(BuildSources)), nil + return &withProvenance{CachedResult: res, j: j, e: e}, nil } -func (j *Job) walkBuildSources(ctx context.Context, e Edge, bsrc BuildSources) BuildSources { - for _, inp := range e.Vertex.Inputs() { - if st, ok := j.list.actives[inp.Vertex.Digest()]; ok { - st.mu.Lock() - for _, cacheRes := range st.op.cacheRes { - for key, val := range cacheRes.BuildSources { - if _, ok := bsrc[key]; !ok { - bsrc[key] = val - } - } +type withProvenance struct { + CachedResult + j *Job + e Edge +} + +func (wp *withProvenance) WalkProvenance(ctx context.Context, f func(ProvenanceProvider) error) error { + if wp.j == nil { + return nil + } + m := map[digest.Digest]struct{}{} + return wp.j.walkProvenance(ctx, wp.e, f, m) +} + +func (j *Job) walkProvenance(ctx context.Context, e Edge, f func(ProvenanceProvider) error, visited map[digest.Digest]struct{}) error { + if _, ok := visited[e.Vertex.Digest()]; ok { + return nil + } + visited[e.Vertex.Digest()] = struct{}{} + if st, ok := j.list.actives[e.Vertex.Digest()]; ok { + st.mu.Lock() + if wp, ok := st.op.op.(ProvenanceProvider); ok { + if err := f(wp); err != nil { + st.mu.Unlock() + return err } - st.mu.Unlock() - bsrc = j.walkBuildSources(ctx, inp, bsrc) + } + st.mu.Unlock() + } + for _, inp := range e.Vertex.Inputs() { + if err := j.walkProvenance(ctx, inp, f, visited); err != nil { + return err } } - return bsrc + return nil +} + +func (j *Job) CloseProgress() { + j.progressCloser() + j.pw.Close() } func (j *Job) Discard() error { - defer j.progressCloser() - j.list.mu.Lock() defer j.list.mu.Unlock() @@ -549,9 +578,7 @@ func (j *Job) Discard() error { delete(st.jobs, j) j.list.deleteIfUnreferenced(k, st) } - if _, ok := st.allPw[j.pw]; ok { - delete(st.allPw, j.pw) - } + delete(st.allPw, j.pw) st.mu.Unlock() } @@ -565,6 +592,21 @@ func (j *Job) Discard() error { return nil } +func (j *Job) StartedTime() time.Time { + return j.startedTime +} + +func (j *Job) RegisterCompleteTime() time.Time { + if j.completedTime.IsZero() { + j.completedTime = time.Now() + } + return j.completedTime +} + +func (j *Job) UniqueID() string { + return j.uniqueID +} + func (j *Job) InContext(ctx context.Context, f func(context.Context, session.Group) error) error { return f(progress.WithProgress(ctx, j.pw), session.NewGroup(j.SessionID)) } @@ -620,8 +662,9 @@ type sharedOp struct { subBuilder *subBuilder err error - execRes *execRes - execErr error + execRes *execRes + execDone bool + execErr error cacheRes []*CacheMap cacheDone bool @@ -646,7 +689,7 @@ func (s *sharedOp) LoadCache(ctx context.Context, rec *CacheRecord) (Result, err ctx = trace.ContextWithSpan(ctx, s.st.mspan) } // no cache hit. start evaluating the node - span, ctx := tracing.StartSpan(ctx, "load cache: "+s.st.vtx.Name()) + span, ctx := tracing.StartSpan(ctx, "load cache: "+s.st.vtx.Name(), trace.WithAttributes(attribute.String("vertex", s.st.vtx.Digest().String()))) notifyCompleted := notifyStarted(ctx, &s.st.clientVertex, true) res, err := s.Cache().Load(withAncestorCacheOpts(ctx, s.st), rec) tracing.FinishWithError(span, err) @@ -758,7 +801,7 @@ func (s *sharedOp) CacheMap(ctx context.Context, index int) (resp *cacheMapResp, ctx = withAncestorCacheOpts(ctx, s.st) if len(s.st.vtx.Inputs()) == 0 { // no cache hit. start evaluating the node - span, ctx := tracing.StartSpan(ctx, "cache request: "+s.st.vtx.Name()) + span, ctx := tracing.StartSpan(ctx, "cache request: "+s.st.vtx.Name(), trace.WithAttributes(attribute.String("vertex", s.st.vtx.Digest().String()))) notifyCompleted := notifyStarted(ctx, &s.st.clientVertex, false) defer func() { tracing.FinishWithError(span, retErr) @@ -780,6 +823,15 @@ func (s *sharedOp) CacheMap(ctx context.Context, index int) (resp *cacheMapResp, } if complete { if err == nil { + if res.Opts == nil { + res.Opts = CacheOpts(make(map[interface{}]interface{})) + } + res.Opts[progressKey{}] = &controller.Controller{ + WriterFactory: progress.FromContext(ctx), + Digest: s.st.vtx.Digest(), + Name: s.st.vtx.Name(), + ProgressGroup: s.st.vtx.Options().ProgressGroup, + } s.cacheRes = append(s.cacheRes, res) s.cacheDone = done } @@ -809,10 +861,10 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, } flightControlKey := "exec" res, err := s.g.Do(ctx, flightControlKey, func(ctx context.Context) (ret interface{}, retErr error) { - if s.execErr != nil { - return nil, s.execErr - } - if s.execRes != nil { + if s.execDone { + if s.execErr != nil { + return nil, s.execErr + } return s.execRes, nil } release, err := op.Acquire(ctx) @@ -828,7 +880,7 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, ctx = withAncestorCacheOpts(ctx, s.st) // no cache hit. start evaluating the node - span, ctx := tracing.StartSpan(ctx, s.st.vtx.Name()) + span, ctx := tracing.StartSpan(ctx, s.st.vtx.Name(), trace.WithAttributes(attribute.String("vertex", s.st.vtx.Digest().String()))) notifyCompleted := notifyStarted(ctx, &s.st.clientVertex, false) defer func() { tracing.FinishWithError(span, retErr) @@ -849,6 +901,7 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, } } if complete { + s.execDone = true if res != nil { var subExporters []ExportableCacheKey s.subBuilder.mu.Lock() diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go index 8507280a10..185fe81f06 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go @@ -11,7 +11,6 @@ import ( "github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend" gw "github.com/moby/buildkit/frontend/gateway/client" "github.com/moby/buildkit/identity" @@ -19,9 +18,11 @@ import ( "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/errdefs" llberrdefs "github.com/moby/buildkit/solver/llbsolver/errdefs" + "github.com/moby/buildkit/solver/llbsolver/provenance" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/sourcepolicy" + spb "github.com/moby/buildkit/sourcepolicy/pb" "github.com/moby/buildkit/util/bklog" - "github.com/moby/buildkit/util/buildinfo" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/worker" @@ -63,20 +64,35 @@ func (b *llbBridge) Warn(ctx context.Context, dgst digest.Digest, msg string, op }) } -func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImports []gw.CacheOptionsEntry) (solver.CachedResult, solver.BuildSources, error) { +func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImports []gw.CacheOptionsEntry, pol []*spb.Policy) (solver.CachedResultWithProvenance, error) { w, err := b.resolveWorker() if err != nil { - return nil, nil, err + return nil, err } ent, err := loadEntitlements(b.builder) if err != nil { - return nil, nil, err + return nil, err + } + srcPol, err := loadSourcePolicy(b.builder) + if err != nil { + return nil, err + } + var polEngine SourcePolicyEvaluator + if srcPol != nil || len(pol) > 0 { + if srcPol != nil { + pol = append([]*spb.Policy{srcPol}, pol...) + } + + polEngine = sourcepolicy.NewEngine(pol) + if err != nil { + return nil, err + } } var cms []solver.CacheManager for _, im := range cacheImports { cmID, err := cmKey(im) if err != nil { - return nil, nil, err + return nil, err } b.cmsMu.Lock() var cm solver.CacheManager @@ -91,7 +107,7 @@ func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImp } ci, desc, err := resolveCI(ctx, g, im.Attrs) if err != nil { - return err + return errors.Wrapf(err, "failed to configure %v cache importer", im.Type) } cmNew, err = ci.Resolve(ctx, desc, cmID, w) return err @@ -111,9 +127,9 @@ func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImp } dpc := &detectPrunedCacheID{} - edge, err := Load(def, dpc.Load, ValidateEntitlements(ent), WithCacheSources(cms), NormalizeRuntimePlatforms(), WithValidateCaps()) + edge, err := Load(ctx, def, polEngine, dpc.Load, ValidateEntitlements(ent), WithCacheSources(cms), NormalizeRuntimePlatforms(), WithValidateCaps()) if err != nil { - return nil, nil, errors.Wrap(err, "failed to load LLB") + return nil, errors.Wrap(err, "failed to load LLB") } if len(dpc.ids) > 0 { @@ -124,107 +140,44 @@ func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImp if err := b.eachWorker(func(w worker.Worker) error { return w.PruneCacheMounts(ctx, ids) }); err != nil { - return nil, nil, err + return nil, err } } - res, bi, err := b.builder.Build(ctx, edge) + res, err := b.builder.Build(ctx, edge) if err != nil { - return nil, nil, err + return nil, err } - return res, bi, nil -} - -func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest, sid string) (res *frontend.Result, err error) { - if req.Definition != nil && req.Definition.Def != nil && req.Frontend != "" { - return nil, errors.New("cannot solve with both Definition and Frontend specified") - } - - if req.Definition != nil && req.Definition.Def != nil { - res = &frontend.Result{Ref: newResultProxy(b, req)} - if req.Evaluate { - _, err = res.Ref.Result(ctx) - } - } else if req.Frontend != "" { - f, ok := b.frontends[req.Frontend] - if !ok { - return nil, errors.Errorf("invalid frontend: %s", req.Frontend) - } - res, err = f.Solve(ctx, b, req.FrontendOpt, req.FrontendInputs, sid, b.sm) - if err != nil { - return nil, err - } - } else { - return &frontend.Result{}, nil - } - - if len(res.Refs) > 0 { - for p := range res.Refs { - dtbi, err := buildinfo.GetMetadata(res.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, p), req.Frontend, req.FrontendOpt) - if err != nil { - return nil, err - } - if dtbi != nil && len(dtbi) > 0 { - if res.Metadata == nil { - res.Metadata = make(map[string][]byte) - } - res.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, p)] = dtbi - } - } - } else { - dtbi, err := buildinfo.GetMetadata(res.Metadata, exptypes.ExporterBuildInfo, req.Frontend, req.FrontendOpt) - if err != nil { - return nil, err - } - if dtbi != nil && len(dtbi) > 0 { - if res.Metadata == nil { - res.Metadata = make(map[string][]byte) - } - res.Metadata[exptypes.ExporterBuildInfo] = dtbi - } - } - - return + return res, nil } type resultProxy struct { - cb func(context.Context) (solver.CachedResult, solver.BuildSources, error) - def *pb.Definition + id string + b *provenanceBridge + req frontend.SolveRequest g flightcontrol.Group mu sync.Mutex released bool v solver.CachedResult - bsrc solver.BuildSources err error errResults []solver.Result + provenance *provenance.Capture } -func newResultProxy(b *llbBridge, req frontend.SolveRequest) *resultProxy { - rp := &resultProxy{ - def: req.Definition, - } - rp.cb = func(ctx context.Context) (solver.CachedResult, solver.BuildSources, error) { - res, bsrc, err := b.loadResult(ctx, req.Definition, req.CacheImports) - var ee *llberrdefs.ExecError - if errors.As(err, &ee) { - ee.EachRef(func(res solver.Result) error { - rp.errResults = append(rp.errResults, res) - return nil - }) - // acquire ownership so ExecError finalizer doesn't attempt to release as well - ee.OwnerBorrowed = true - } - return res, bsrc, err - } - return rp +func newResultProxy(b *provenanceBridge, req frontend.SolveRequest) *resultProxy { + return &resultProxy{req: req, b: b, id: identity.NewID()} +} + +func (rp *resultProxy) ID() string { + return rp.id } func (rp *resultProxy) Definition() *pb.Definition { - return rp.def + return rp.req.Definition } -func (rp *resultProxy) BuildSources() solver.BuildSources { - return rp.bsrc +func (rp *resultProxy) Provenance() interface{} { + return rp.provenance } func (rp *resultProxy) Release(ctx context.Context) (err error) { @@ -255,12 +208,12 @@ func (rp *resultProxy) wrapError(err error) error { } var ve *errdefs.VertexError if errors.As(err, &ve) { - if rp.def.Source != nil { - locs, ok := rp.def.Source.Locations[string(ve.Digest)] + if rp.req.Definition.Source != nil { + locs, ok := rp.req.Definition.Source.Locations[string(ve.Digest)] if ok { for _, loc := range locs.Locations { err = errdefs.WithSource(err, errdefs.Source{ - Info: rp.def.Source.Infos[loc.SourceIndex], + Info: rp.req.Definition.Source.Infos[loc.SourceIndex], Ranges: loc.Ranges, }) } @@ -270,6 +223,20 @@ func (rp *resultProxy) wrapError(err error) error { return err } +func (rp *resultProxy) loadResult(ctx context.Context) (solver.CachedResultWithProvenance, error) { + res, err := rp.b.loadResult(ctx, rp.req.Definition, rp.req.CacheImports, rp.req.SourcePolicies) + var ee *llberrdefs.ExecError + if errors.As(err, &ee) { + ee.EachRef(func(res solver.Result) error { + rp.errResults = append(rp.errResults, res) + return nil + }) + // acquire ownership so ExecError finalizer doesn't attempt to release as well + ee.OwnerBorrowed = true + } + return res, err +} + func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err error) { defer func() { err = rp.wrapError(err) @@ -285,7 +252,7 @@ func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err return rp.v, rp.err } rp.mu.Unlock() - v, bsrc, err := rp.cb(ctx) + v, err := rp.loadResult(ctx) if err != nil { select { case <-ctx.Done(): @@ -304,8 +271,16 @@ func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err return nil, errors.Errorf("evaluating released result") } rp.v = v - rp.bsrc = bsrc rp.err = err + if err == nil { + capture, err := captureProvenance(ctx, v) + if err != nil && rp.err != nil { + rp.err = errors.Wrapf(rp.err, "failed to capture provenance: %v", err) + v.Release(context.TODO()) + rp.v = nil + } + rp.provenance = capture + } rp.mu.Unlock() return v, err }) diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go index 732e674741..974c2e04e8 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go @@ -2,7 +2,6 @@ package file import ( "context" - "io/ioutil" "log" "os" "path/filepath" @@ -110,7 +109,7 @@ func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *cop return err } - if err := ioutil.WriteFile(p, action.Data, os.FileMode(action.Mode)&0777); err != nil { + if err := os.WriteFile(p, action.Data, os.FileMode(action.Mode)&0777); err != nil { return err } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go index e1c58c1e54..b9f3b2ea3c 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go @@ -11,12 +11,13 @@ import ( "github.com/pkg/errors" ) -func NewRefManager(cm cache.Manager) *RefManager { - return &RefManager{cm: cm} +func NewRefManager(cm cache.Manager, name string) *RefManager { + return &RefManager{cm: cm, desc: name} } type RefManager struct { - cm cache.Manager + cm cache.Manager + desc string } func (rm *RefManager) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly bool, g session.Group) (_ fileoptypes.Mount, rerr error) { @@ -33,7 +34,13 @@ func (rm *RefManager) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly return &Mount{m: m, readonly: readonly}, nil } - mr, err := rm.cm.New(ctx, ir, g, cache.WithDescription("fileop target"), cache.CachePolicyRetain) + desc := "fileop target" + + if d := rm.desc; d != "" { + desc = d + } + + mr, err := rm.cm.New(ctx, ir, g, cache.WithDescription(desc), cache.CachePolicyRetain) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/history.go b/vendor/github.com/moby/buildkit/solver/llbsolver/history.go new file mode 100644 index 0000000000..c8310cc48e --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/history.go @@ -0,0 +1,675 @@ +package llbsolver + +import ( + "bufio" + "context" + "encoding/binary" + "io" + "os" + "sort" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/leases" + controlapi "github.com/moby/buildkit/api/services/control" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/cmd/buildkitd/config" + "github.com/moby/buildkit/util/leaseutil" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + bolt "go.etcd.io/bbolt" +) + +const ( + recordsBucket = "_records" +) + +type HistoryQueueOpt struct { + DB *bolt.DB + LeaseManager leases.Manager + ContentStore content.Store + CleanConfig *config.HistoryConfig +} + +type HistoryQueue struct { + mu sync.Mutex + initOnce sync.Once + HistoryQueueOpt + ps *pubsub[*controlapi.BuildHistoryEvent] + active map[string]*controlapi.BuildHistoryRecord + refs map[string]int + deleted map[string]struct{} +} + +type StatusImportResult struct { + Descriptor ocispecs.Descriptor + NumCachedSteps int + NumCompletedSteps int + NumTotalSteps int +} + +func NewHistoryQueue(opt HistoryQueueOpt) *HistoryQueue { + if opt.CleanConfig == nil { + opt.CleanConfig = &config.HistoryConfig{ + MaxAge: int64((48 * time.Hour).Seconds()), + MaxEntries: 50, + } + } + h := &HistoryQueue{ + HistoryQueueOpt: opt, + ps: &pubsub[*controlapi.BuildHistoryEvent]{ + m: map[*channel[*controlapi.BuildHistoryEvent]]struct{}{}, + }, + active: map[string]*controlapi.BuildHistoryRecord{}, + refs: map[string]int{}, + deleted: map[string]struct{}{}, + } + + go func() { + for { + h.gc() + time.Sleep(120 * time.Second) + } + }() + + return h +} + +func (h *HistoryQueue) gc() error { + var records []*controlapi.BuildHistoryRecord + + if err := h.DB.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(recordsBucket)) + if b == nil { + return nil + } + return b.ForEach(func(key, dt []byte) error { + var br controlapi.BuildHistoryRecord + if err := br.Unmarshal(dt); err != nil { + return errors.Wrapf(err, "failed to unmarshal build record %s", key) + } + if br.Pinned { + return nil + } + records = append(records, &br) + return nil + }) + }); err != nil { + return err + } + + // in order for record to get deleted by gc it exceed both maxentries and maxage criteria + + if len(records) < int(h.CleanConfig.MaxEntries) { + return nil + } + + sort.Slice(records, func(i, j int) bool { + return records[i].CompletedAt.Before(*records[j].CompletedAt) + }) + + h.mu.Lock() + defer h.mu.Unlock() + + now := time.Now() + for _, r := range records[h.CleanConfig.MaxEntries:] { + if now.Add(time.Duration(h.CleanConfig.MaxAge) * -time.Second).After(*r.CompletedAt) { + if err := h.delete(r.Ref, false); err != nil { + return err + } + } + } + + return nil +} + +func (h *HistoryQueue) delete(ref string, sync bool) error { + if _, ok := h.refs[ref]; ok { + h.deleted[ref] = struct{}{} + return nil + } + delete(h.deleted, ref) + if err := h.DB.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(recordsBucket)) + if b == nil { + return os.ErrNotExist + } + err1 := b.Delete([]byte(ref)) + var opts []leases.DeleteOpt + if sync { + opts = append(opts, leases.SynchronousDelete) + } + err2 := h.LeaseManager.Delete(context.TODO(), leases.Lease{ID: h.leaseID(ref)}, opts...) + if err1 != nil { + return err1 + } + return err2 + }); err != nil { + return err + } + return nil +} + +func (h *HistoryQueue) init() error { + var err error + h.initOnce.Do(func() { + err = h.DB.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucketIfNotExists([]byte(recordsBucket)) + return err + }) + }) + return err +} + +func (h *HistoryQueue) leaseID(id string) string { + return "ref_" + id +} + +func (h *HistoryQueue) addResource(ctx context.Context, l leases.Lease, desc *controlapi.Descriptor) error { + if desc == nil { + return nil + } + return h.LeaseManager.AddResource(ctx, l, leases.Resource{ + ID: string(desc.Digest), + Type: "content", + }) +} + +func (h *HistoryQueue) UpdateRef(ctx context.Context, ref string, upt func(r *controlapi.BuildHistoryRecord) error) error { + h.mu.Lock() + defer h.mu.Unlock() + + var br controlapi.BuildHistoryRecord + if err := h.DB.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(recordsBucket)) + if b == nil { + return os.ErrNotExist + } + dt := b.Get([]byte(ref)) + if dt == nil { + return os.ErrNotExist + } + + if err := br.Unmarshal(dt); err != nil { + return errors.Wrapf(err, "failed to unmarshal build record %s", ref) + } + return nil + }); err != nil { + return err + } + + if err := upt(&br); err != nil { + return err + } + br.Generation++ + + if br.Ref != ref { + return errors.Errorf("invalid ref change") + } + + if err := h.update(ctx, br); err != nil { + return err + } + h.ps.Send(&controlapi.BuildHistoryEvent{ + Type: controlapi.BuildHistoryEventType_COMPLETE, + Record: &br, + }) + return nil +} + +func (h *HistoryQueue) Status(ctx context.Context, ref string, st chan<- *client.SolveStatus) error { + h.init() + var br controlapi.BuildHistoryRecord + if err := h.DB.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(recordsBucket)) + if b == nil { + return os.ErrNotExist + } + dt := b.Get([]byte(ref)) + if dt == nil { + return os.ErrNotExist + } + + if err := br.Unmarshal(dt); err != nil { + return errors.Wrapf(err, "failed to unmarshal build record %s", ref) + } + return nil + }); err != nil { + return err + } + + if br.Logs == nil { + return nil + } + + ra, err := h.ContentStore.ReaderAt(ctx, ocispecs.Descriptor{ + Digest: br.Logs.Digest, + Size: br.Logs.Size_, + MediaType: br.Logs.MediaType, + }) + if err != nil { + return err + } + defer ra.Close() + + brdr := bufio.NewReader(&reader{ReaderAt: ra}) + + buf := make([]byte, 32*1024) + + for { + _, err := io.ReadAtLeast(brdr, buf[:4], 4) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return err + } + sz := binary.LittleEndian.Uint32(buf[:4]) + if sz > uint32(len(buf)) { + buf = make([]byte, sz) + } + _, err = io.ReadAtLeast(brdr, buf[:sz], int(sz)) + if err != nil { + return err + } + var sr controlapi.StatusResponse + if err := sr.Unmarshal(buf[:sz]); err != nil { + return err + } + st <- client.NewSolveStatus(&sr) + } + + return nil +} + +func (h *HistoryQueue) update(ctx context.Context, rec controlapi.BuildHistoryRecord) error { + return h.DB.Update(func(tx *bolt.Tx) (err error) { + b := tx.Bucket([]byte(recordsBucket)) + if b == nil { + return nil + } + dt, err := rec.Marshal() + if err != nil { + return err + } + + l, err := h.LeaseManager.Create(ctx, leases.WithID(h.leaseID(rec.Ref))) + created := true + if err != nil { + if !errors.Is(err, errdefs.ErrAlreadyExists) { + return err + } + l = leases.Lease{ID: h.leaseID(rec.Ref)} + created = false + } + + defer func() { + if err != nil && created { + h.LeaseManager.Delete(ctx, l) + } + }() + + if err := h.addResource(ctx, l, rec.Logs); err != nil { + return err + } + if err := h.addResource(ctx, l, rec.Trace); err != nil { + return err + } + if rec.Result != nil { + if err := h.addResource(ctx, l, rec.Result.Result); err != nil { + return err + } + for _, att := range rec.Result.Attestations { + if err := h.addResource(ctx, l, att); err != nil { + return err + } + } + } + for _, r := range rec.Results { + if err := h.addResource(ctx, l, r.Result); err != nil { + return err + } + for _, att := range r.Attestations { + if err := h.addResource(ctx, l, att); err != nil { + return err + } + } + } + + return b.Put([]byte(rec.Ref), dt) + }) +} + +func (h *HistoryQueue) Update(ctx context.Context, e *controlapi.BuildHistoryEvent) error { + h.init() + h.mu.Lock() + defer h.mu.Unlock() + + if e.Type == controlapi.BuildHistoryEventType_STARTED { + h.active[e.Record.Ref] = e.Record + h.ps.Send(e) + } + + if e.Type == controlapi.BuildHistoryEventType_COMPLETE { + delete(h.active, e.Record.Ref) + if err := h.update(ctx, *e.Record); err != nil { + return err + } + h.ps.Send(e) + } + return nil +} + +func (h *HistoryQueue) Delete(ctx context.Context, ref string) error { + h.mu.Lock() + defer h.mu.Unlock() + + return h.delete(ref, true) +} + +func (h *HistoryQueue) OpenBlobWriter(ctx context.Context, mt string) (_ *Writer, err error) { + l, err := h.LeaseManager.Create(ctx, leases.WithRandomID(), leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary) + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + h.LeaseManager.Delete(ctx, l) + } + }() + + ctx = leases.WithLease(ctx, l.ID) + + w, err := content.OpenWriter(ctx, h.ContentStore, content.WithRef("history-"+h.leaseID(l.ID))) + if err != nil { + return nil, err + } + + return &Writer{ + mt: mt, + lm: h.LeaseManager, + l: l, + w: w, + dgstr: digest.Canonical.Digester(), + }, nil +} + +type Writer struct { + mt string + w content.Writer + lm leases.Manager + l leases.Lease + + dgstr digest.Digester + sz int +} + +func (w *Writer) Write(p []byte) (int, error) { + if _, err := w.dgstr.Hash().Write(p); err != nil { + return 0, err + } + w.sz += len(p) + return w.w.Write(p) +} + +func (w *Writer) Discard() { + w.w.Close() + w.lm.Delete(context.TODO(), w.l) +} + +func (w *Writer) Commit(ctx context.Context) (*ocispecs.Descriptor, func(), error) { + dgst := w.dgstr.Digest() + sz := int64(w.sz) + if err := w.w.Commit(ctx, int64(w.sz), dgst); err != nil { + if !errdefs.IsAlreadyExists(err) { + w.Discard() + return nil, nil, err + } + } + return &ocispecs.Descriptor{ + MediaType: w.mt, + Digest: dgst, + Size: sz, + }, + func() { + w.lm.Delete(context.TODO(), w.l) + }, nil +} + +func (h *HistoryQueue) ImportStatus(ctx context.Context, ch chan *client.SolveStatus) (_ *StatusImportResult, _ func(), err error) { + defer func() { + if ch == nil { + return + } + for range ch { + } + }() + + w, err := h.OpenBlobWriter(ctx, "application/vnd.buildkit.status.v0") + if err != nil { + return nil, nil, err + } + + bufW := bufio.NewWriter(w) + + defer func() { + if err != nil { + w.Discard() + } + }() + + type vtxInfo struct { + cached bool + completed bool + } + vtxMap := make(map[digest.Digest]*vtxInfo) + + buf := make([]byte, 32*1024) + for st := range ch { + for _, vtx := range st.Vertexes { + if _, ok := vtxMap[vtx.Digest]; !ok { + vtxMap[vtx.Digest] = &vtxInfo{} + } + if vtx.Cached { + vtxMap[vtx.Digest].cached = true + } + if vtx.Completed != nil { + vtxMap[vtx.Digest].completed = true + } + } + + hdr := make([]byte, 4) + for _, pst := range st.Marshal() { + sz := pst.Size() + if len(buf) < sz { + buf = make([]byte, sz) + } + n, err := pst.MarshalTo(buf) + if err != nil { + return nil, nil, err + } + binary.LittleEndian.PutUint32(hdr, uint32(n)) + if _, err := bufW.Write(hdr); err != nil { + return nil, nil, err + } + if _, err := bufW.Write(buf[:n]); err != nil { + return nil, nil, err + } + } + } + if err := bufW.Flush(); err != nil { + return nil, nil, err + } + desc, release, err := w.Commit(ctx) + if err != nil { + return nil, nil, err + } + + numCached := 0 + numCompleted := 0 + for _, info := range vtxMap { + if info.cached { + numCached++ + } + if info.completed { + numCompleted++ + } + } + + return &StatusImportResult{ + Descriptor: *desc, + NumCachedSteps: numCached, + NumCompletedSteps: numCompleted, + NumTotalSteps: len(vtxMap), + }, release, nil +} + +func (h *HistoryQueue) Listen(ctx context.Context, req *controlapi.BuildHistoryRequest, f func(*controlapi.BuildHistoryEvent) error) error { + h.init() + + h.mu.Lock() + sub := h.ps.Subscribe() + defer sub.close() + + if req.Ref != "" { + if _, ok := h.deleted[req.Ref]; ok { + h.mu.Unlock() + return errors.Wrapf(os.ErrNotExist, "ref %s is deleted", req.Ref) + } + + h.refs[req.Ref]++ + defer func() { + h.mu.Lock() + h.refs[req.Ref]-- + if _, ok := h.deleted[req.Ref]; ok { + if h.refs[req.Ref] == 0 { + delete(h.refs, req.Ref) + h.delete(req.Ref, false) + } + } + h.mu.Unlock() + }() + } + + for _, e := range h.active { + if req.Ref != "" && e.Ref != req.Ref { + continue + } + sub.ps.Send(&controlapi.BuildHistoryEvent{ + Type: controlapi.BuildHistoryEventType_STARTED, + Record: e, + }) + } + + h.mu.Unlock() + + if !req.ActiveOnly { + if err := h.DB.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(recordsBucket)) + if b == nil { + return nil + } + return b.ForEach(func(key, dt []byte) error { + if req.Ref != "" && req.Ref != string(key) { + return nil + } + var br controlapi.BuildHistoryRecord + if err := br.Unmarshal(dt); err != nil { + return errors.Wrapf(err, "failed to unmarshal build record %s", key) + } + if err := f(&controlapi.BuildHistoryEvent{ + Record: &br, + Type: controlapi.BuildHistoryEventType_COMPLETE, + }); err != nil { + return err + } + return nil + }) + }); err != nil { + return err + } + } + + if req.EarlyExit { + return nil + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case e := <-sub.ch: + if req.Ref != "" && req.Ref != e.Record.Ref { + continue + } + if err := f(e); err != nil { + return err + } + case <-sub.done: + return nil + } + } +} + +type pubsub[T any] struct { + mu sync.Mutex + m map[*channel[T]]struct{} +} + +func (p *pubsub[T]) Subscribe() *channel[T] { + p.mu.Lock() + c := &channel[T]{ + ps: p, + ch: make(chan T, 32), + done: make(chan struct{}), + } + p.m[c] = struct{}{} + p.mu.Unlock() + return c +} + +func (p *pubsub[T]) Send(v T) { + p.mu.Lock() + for c := range p.m { + go c.send(v) + } + p.mu.Unlock() +} + +type channel[T any] struct { + ps *pubsub[T] + ch chan T + done chan struct{} + closeOnce sync.Once +} + +func (p *channel[T]) send(v T) { + select { + case p.ch <- v: + case <-p.done: + } +} + +func (p *channel[T]) close() { + p.closeOnce.Do(func() { + p.ps.mu.Lock() + delete(p.ps.m, p) + p.ps.mu.Unlock() + close(p.done) + }) +} + +type reader struct { + io.ReaderAt + pos int64 +} + +func (r *reader) Read(p []byte) (int, error) { + n, err := r.ReaderAt.ReadAt(p, r.pos) + r.pos += int64(len(p)) + return n, err +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go b/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go index ffa4df5da3..37bc8a602d 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go @@ -3,7 +3,6 @@ package mounts import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "sync" @@ -259,7 +258,7 @@ func (mm *MountManager) getSecretMountable(ctx context.Context, m *pb.Mount, g s } return nil }) - if err != nil || dt == nil { + if err != nil { return nil, err } return &secretMount{mount: m, data: dt, idmap: mm.cm.IdentityMapping()}, nil @@ -282,7 +281,7 @@ type secretMountInstance struct { } func (sm *secretMountInstance) Mount() ([]mount.Mount, func() error, error) { - dir, err := ioutil.TempDir("", "buildkit-secrets") + dir, err := os.MkdirTemp("", "buildkit-secrets") if err != nil { return nil, nil, errors.Wrap(err, "failed to create temp dir") } @@ -320,7 +319,7 @@ func (sm *secretMountInstance) Mount() ([]mount.Mount, func() error, error) { randID := identity.NewID() fp := filepath.Join(dir, randID) - if err := ioutil.WriteFile(fp, sm.sm.data, 0600); err != nil { + if err := os.WriteFile(fp, sm.sm.data, 0600); err != nil { cleanup() return nil, nil, err } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go index 39d2a77075..fd47df3ae3 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go @@ -11,7 +11,7 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" @@ -20,24 +20,26 @@ import ( const buildCacheType = "buildkit.build.v0" -type buildOp struct { +type BuildOp struct { op *pb.BuildOp b frontend.FrontendLLBBridge v solver.Vertex } -func NewBuildOp(v solver.Vertex, op *pb.Op_Build, b frontend.FrontendLLBBridge, _ worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { +var _ solver.Op = &BuildOp{} + +func NewBuildOp(v solver.Vertex, op *pb.Op_Build, b frontend.FrontendLLBBridge, _ worker.Worker) (*BuildOp, error) { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } - return &buildOp{ + return &BuildOp{ op: op.Build, b: b, v: v, }, nil } -func (b *buildOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { +func (b *BuildOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { dt, err := json.Marshal(struct { Type string Exec *pb.BuildOp @@ -59,7 +61,7 @@ func (b *buildOp) CacheMap(ctx context.Context, g session.Group, index int) (*so }, true, nil } -func (b *buildOp) Exec(ctx context.Context, g session.Group, inputs []solver.Result) (outputs []solver.Result, retErr error) { +func (b *BuildOp) Exec(ctx context.Context, g session.Group, inputs []solver.Result) (outputs []solver.Result, retErr error) { if b.op.Builder != pb.LLBBuilder { return nil, errors.Errorf("only LLB builder is currently allowed") } @@ -130,9 +132,12 @@ func (b *buildOp) Exec(ctx context.Context, g session.Group, inputs []solver.Res return nil, err } - for _, r := range newRes.Refs { - r.Release(context.TODO()) - } + newRes.EachRef(func(ref solver.ResultProxy) error { + if ref == newRes.Ref { + return nil + } + return ref.Release(context.TODO()) + }) r, err := newRes.Ref.Result(ctx) if err != nil { @@ -142,7 +147,9 @@ func (b *buildOp) Exec(ctx context.Context, g session.Group, inputs []solver.Res return []solver.Result{r}, err } -func (b *buildOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { +func (b *BuildOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { // buildOp itself does not count towards parallelism budget. return func() {}, nil } + +func (b *BuildOp) IsProvenanceProvider() {} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/diff.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/diff.go index 1a05f7a6c7..338a8748e8 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/diff.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/diff.go @@ -4,15 +4,13 @@ import ( "context" "encoding/json" - "github.com/moby/buildkit/util/progress" - "github.com/moby/buildkit/util/progress/controller" "github.com/moby/buildkit/worker" "github.com/pkg/errors" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" digest "github.com/opencontainers/go-digest" ) @@ -23,11 +21,10 @@ type diffOp struct { op *pb.DiffOp worker worker.Worker vtx solver.Vertex - pg progress.Controller } func NewDiffOp(v solver.Vertex, op *pb.Op_Diff, w worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } return &diffOp{ @@ -64,17 +61,8 @@ func (d *diffOp) CacheMap(ctx context.Context, group session.Group, index int) ( ComputeDigestFunc solver.ResultBasedCacheFunc PreprocessFunc solver.PreprocessFunc }, depCount), - Opts: solver.CacheOpts(make(map[interface{}]interface{})), } - d.pg = &controller.Controller{ - WriterFactory: progress.FromContext(ctx), - Digest: d.vtx.Digest(), - Name: d.vtx.Name(), - ProgressGroup: d.vtx.Options().ProgressGroup, - } - cm.Opts[cache.ProgressKey{}] = d.pg - return cm, true, nil } @@ -121,7 +109,7 @@ func (d *diffOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu return []solver.Result{worker.NewWorkerRefResult(nil, d.worker)}, nil } - diffRef, err := d.worker.CacheManager().Diff(ctx, lowerRef, upperRef, d.pg, + diffRef, err := d.worker.CacheManager().Diff(ctx, lowerRef, upperRef, solver.ProgressControllerFromContext(ctx), cache.WithDescription(d.vtx.Name())) if err != nil { return nil, err diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go index 6cca733c0b..2bee1283b4 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go @@ -17,12 +17,10 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/secrets" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" "github.com/moby/buildkit/solver/llbsolver/errdefs" "github.com/moby/buildkit/solver/llbsolver/mounts" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/progress" - "github.com/moby/buildkit/util/progress/controller" "github.com/moby/buildkit/util/progress/logs" utilsystem "github.com/moby/buildkit/util/system" "github.com/moby/buildkit/worker" @@ -35,7 +33,7 @@ import ( const execCacheType = "buildkit.exec.v0" -type execOp struct { +type ExecOp struct { op *pb.ExecOp cm cache.Manager mm *mounts.MountManager @@ -45,15 +43,16 @@ type execOp struct { platform *pb.Platform numInputs int parallelism *semaphore.Weighted - vtx solver.Vertex } -func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, parallelism *semaphore.Weighted, sm *session.Manager, exec executor.Executor, w worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { +var _ solver.Op = &ExecOp{} + +func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, parallelism *semaphore.Weighted, sm *session.Manager, exec executor.Executor, w worker.Worker) (*ExecOp, error) { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } name := fmt.Sprintf("exec %s", strings.Join(op.Exec.Meta.Args, " ")) - return &execOp{ + return &ExecOp{ op: op.Exec, mm: mounts.NewMountManager(name, cm, sm), cm: cm, @@ -63,10 +62,13 @@ func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache. w: w, platform: platform, parallelism: parallelism, - vtx: v, }, nil } +func (e *ExecOp) Proto() *pb.ExecOp { + return e.op +} + func cloneExecOp(old *pb.ExecOp) pb.ExecOp { n := *old meta := *n.Meta @@ -84,7 +86,7 @@ func cloneExecOp(old *pb.ExecOp) pb.ExecOp { return n } -func (e *execOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { +func (e *ExecOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { op := cloneExecOp(e.op) for i := range op.Meta.ExtraHosts { h := op.Meta.ExtraHosts[i] @@ -145,14 +147,6 @@ func (e *execOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol ComputeDigestFunc solver.ResultBasedCacheFunc PreprocessFunc solver.PreprocessFunc }, e.numInputs), - Opts: solver.CacheOpts(map[interface{}]interface{}{ - cache.ProgressKey{}: &controller.Controller{ - WriterFactory: progress.FromContext(ctx), - Digest: e.vtx.Digest(), - Name: e.vtx.Name(), - ProgressGroup: e.vtx.Options().ProgressGroup, - }, - }), } deps, err := e.getMountDeps() @@ -169,9 +163,9 @@ func (e *execOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol cm.Deps[i].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0})) } if !dep.NoContentBasedHash { - cm.Deps[i].ComputeDigestFunc = llbsolver.NewContentHashFunc(toSelectors(dedupePaths(dep.Selectors))) + cm.Deps[i].ComputeDigestFunc = opsutils.NewContentHashFunc(toSelectors(dedupePaths(dep.Selectors))) } - cm.Deps[i].PreprocessFunc = llbsolver.UnlazyResultFunc + cm.Deps[i].PreprocessFunc = unlazyResultFunc } return cm, true, nil @@ -201,10 +195,10 @@ func dedupePaths(inp []string) []string { return paths } -func toSelectors(p []string) []llbsolver.Selector { - sel := make([]llbsolver.Selector, 0, len(p)) +func toSelectors(p []string) []opsutils.Selector { + sel := make([]opsutils.Selector, 0, len(p)) for _, p := range p { - sel = append(sel, llbsolver.Selector{Path: p, FollowLinks: true}) + sel = append(sel, opsutils.Selector{Path: p, FollowLinks: true}) } return sel } @@ -214,7 +208,7 @@ type dep struct { NoContentBasedHash bool } -func (e *execOp) getMountDeps() ([]dep, error) { +func (e *ExecOp) getMountDeps() ([]dep, error) { deps := make([]dep, e.numInputs) for _, m := range e.op.Mounts { if m.Input == pb.Empty { @@ -246,7 +240,7 @@ func addDefaultEnvvar(env []string, k, v string) []string { return append(env, k+"="+v) } -func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Result) (results []solver.Result, err error) { +func (e *ExecOp) Exec(ctx context.Context, g session.Group, inputs []solver.Result) (results []solver.Result, err error) { trace.SpanFromContext(ctx).AddEvent("ExecOp started") refs := make([]*worker.WorkerRef, len(inputs)) @@ -325,17 +319,18 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu } meta := executor.Meta{ - Args: e.op.Meta.Args, - Env: e.op.Meta.Env, - Cwd: e.op.Meta.Cwd, - User: e.op.Meta.User, - Hostname: e.op.Meta.Hostname, - ReadonlyRootFS: p.ReadonlyRootFS, - ExtraHosts: extraHosts, - Ulimit: e.op.Meta.Ulimit, - CgroupParent: e.op.Meta.CgroupParent, - NetMode: e.op.Network, - SecurityMode: e.op.Security, + Args: e.op.Meta.Args, + Env: e.op.Meta.Env, + Cwd: e.op.Meta.Cwd, + User: e.op.Meta.User, + Hostname: e.op.Meta.Hostname, + ReadonlyRootFS: p.ReadonlyRootFS, + ExtraHosts: extraHosts, + Ulimit: e.op.Meta.Ulimit, + CgroupParent: e.op.Meta.CgroupParent, + NetMode: e.op.Network, + SecurityMode: e.op.Security, + RemoveMountStubsRecursive: e.op.Meta.RemoveMountStubsRecursive, } if e.op.Meta.ProxyEnv != nil { @@ -405,7 +400,7 @@ func proxyEnvList(p *pb.ProxyEnv) []string { return out } -func (e *execOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { +func (e *ExecOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { if e.parallelism == nil { return func() {}, nil } @@ -418,7 +413,7 @@ func (e *execOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { }, nil } -func (e *execOp) loadSecretEnv(ctx context.Context, g session.Group) ([]string, error) { +func (e *ExecOp) loadSecretEnv(ctx context.Context, g session.Group) ([]string, error) { secretenv := e.op.Secretenv if len(secretenv) == 0 { return nil, nil @@ -448,3 +443,6 @@ func (e *execOp) loadSecretEnv(ctx context.Context, g session.Group) ([]string, } return out, nil } + +func (e *ExecOp) IsProvenanceProvider() { +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec_binfmt.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec_binfmt.go index 56433d49fd..c2c5504cc3 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec_binfmt.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec_binfmt.go @@ -2,7 +2,6 @@ package ops import ( "context" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -28,6 +27,7 @@ var qemuArchMap = map[string]string{ "riscv64": "riscv64", "arm": "arm", "s390x": "s390x", + "ppc64": "ppc64", "ppc64le": "ppc64le", "386": "i386", } @@ -47,7 +47,7 @@ type staticEmulatorMount struct { } func (m *staticEmulatorMount) Mount() ([]mount.Mount, func() error, error) { - tmpdir, err := ioutil.TempDir("", "buildkit-qemu-emulator") + tmpdir, err := os.MkdirTemp("", "buildkit-qemu-emulator") if err != nil { return nil, nil, err } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go index 012ef4cc12..7bbb327679 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go @@ -13,14 +13,12 @@ import ( "github.com/moby/buildkit/cache" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" "github.com/moby/buildkit/solver/llbsolver/errdefs" "github.com/moby/buildkit/solver/llbsolver/file" "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/flightcontrol" - "github.com/moby/buildkit/util/progress" - "github.com/moby/buildkit/util/progress/controller" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -37,11 +35,10 @@ type fileOp struct { solver *FileOpSolver numInputs int parallelism *semaphore.Weighted - vtx solver.Vertex } func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, parallelism *semaphore.Weighted, w worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } return &fileOp{ @@ -49,14 +46,13 @@ func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, parallelism *s md: cm, numInputs: len(v.Inputs()), w: w, - solver: NewFileOpSolver(w, &file.Backend{}, file.NewRefManager(cm)), + solver: NewFileOpSolver(w, &file.Backend{}, file.NewRefManager(cm, v.Name())), parallelism: parallelism, - vtx: v, }, nil } func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { - selectors := map[int][]llbsolver.Selector{} + selectors := map[int][]opsutils.Selector{} invalidSelectors := map[int]struct{}{} actions := make([][]byte, 0, len(f.op.Actions)) @@ -138,14 +134,6 @@ func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol ComputeDigestFunc solver.ResultBasedCacheFunc PreprocessFunc solver.PreprocessFunc }, f.numInputs), - Opts: solver.CacheOpts(map[interface{}]interface{}{ - cache.ProgressKey{}: &controller.Controller{ - WriterFactory: progress.FromContext(ctx), - Digest: f.vtx.Digest(), - Name: f.vtx.Name(), - ProgressGroup: f.vtx.Options().ProgressGroup, - }, - }), } for idx, m := range selectors { @@ -161,10 +149,10 @@ func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol }) cm.Deps[idx].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0})) - cm.Deps[idx].ComputeDigestFunc = llbsolver.NewContentHashFunc(dedupeSelectors(m)) + cm.Deps[idx].ComputeDigestFunc = opsutils.NewContentHashFunc(dedupeSelectors(m)) } for idx := range cm.Deps { - cm.Deps[idx].PreprocessFunc = llbsolver.UnlazyResultFunc + cm.Deps[idx].PreprocessFunc = unlazyResultFunc } return cm, true, nil @@ -206,8 +194,8 @@ func (f *fileOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { }, nil } -func addSelector(m map[int][]llbsolver.Selector, idx int, sel string, wildcard, followLinks bool, includePatterns, excludePatterns []string) { - s := llbsolver.Selector{ +func addSelector(m map[int][]opsutils.Selector, idx int, sel string, wildcard, followLinks bool, includePatterns, excludePatterns []string) { + s := opsutils.Selector{ Path: sel, FollowLinks: followLinks, Wildcard: wildcard && containsWildcards(sel), @@ -231,7 +219,7 @@ func containsWildcards(name string) bool { return false } -func dedupeSelectors(m []llbsolver.Selector) []llbsolver.Selector { +func dedupeSelectors(m []opsutils.Selector) []opsutils.Selector { paths := make([]string, 0, len(m)) pathsFollow := make([]string, 0, len(m)) for _, sel := range m { @@ -245,13 +233,13 @@ func dedupeSelectors(m []llbsolver.Selector) []llbsolver.Selector { } paths = dedupePaths(paths) pathsFollow = dedupePaths(pathsFollow) - selectors := make([]llbsolver.Selector, 0, len(m)) + selectors := make([]opsutils.Selector, 0, len(m)) for _, p := range paths { - selectors = append(selectors, llbsolver.Selector{Path: p}) + selectors = append(selectors, opsutils.Selector{Path: p}) } for _, p := range pathsFollow { - selectors = append(selectors, llbsolver.Selector{Path: p, FollowLinks: true}) + selectors = append(selectors, opsutils.Selector{Path: p, FollowLinks: true}) } for _, sel := range m { @@ -267,7 +255,7 @@ func dedupeSelectors(m []llbsolver.Selector) []llbsolver.Selector { return selectors } -func processOwner(chopt *pb.ChownOpt, selectors map[int][]llbsolver.Selector) error { +func processOwner(chopt *pb.ChownOpt, selectors map[int][]opsutils.Selector) error { if chopt == nil { return nil } @@ -677,3 +665,14 @@ func isDefaultIndexes(idxs [][]int) bool { } return true } + +func unlazyResultFunc(ctx context.Context, res solver.Result, g session.Group) error { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return errors.Errorf("invalid reference: %T", res) + } + if ref.ImmutableRef == nil { + return nil + } + return ref.ImmutableRef.Extract(ctx, g) +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/merge.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/merge.go index 13bb60ba88..db1b025bff 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/merge.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/merge.go @@ -4,15 +4,13 @@ import ( "context" "encoding/json" - "github.com/moby/buildkit/util/progress" - "github.com/moby/buildkit/util/progress/controller" "github.com/moby/buildkit/worker" "github.com/pkg/errors" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" digest "github.com/opencontainers/go-digest" ) @@ -23,11 +21,10 @@ type mergeOp struct { op *pb.MergeOp worker worker.Worker vtx solver.Vertex - pg progress.Controller } func NewMergeOp(v solver.Vertex, op *pb.Op_Merge, w worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } return &mergeOp{ @@ -56,17 +53,8 @@ func (m *mergeOp) CacheMap(ctx context.Context, group session.Group, index int) ComputeDigestFunc solver.ResultBasedCacheFunc PreprocessFunc solver.PreprocessFunc }, len(m.op.Inputs)), - Opts: solver.CacheOpts(make(map[interface{}]interface{})), } - m.pg = &controller.Controller{ - WriterFactory: progress.FromContext(ctx), - Digest: m.vtx.Digest(), - Name: m.vtx.Name(), - ProgressGroup: m.vtx.Options().ProgressGroup, - } - cm.Opts[cache.ProgressKey{}] = m.pg - return cm, true, nil } @@ -93,7 +81,7 @@ func (m *mergeOp) Exec(ctx context.Context, g session.Group, inputs []solver.Res return nil, nil } - mergedRef, err := m.worker.CacheManager().Merge(ctx, refs, m.pg, + mergedRef, err := m.worker.CacheManager().Merge(ctx, refs, solver.ProgressControllerFromContext(ctx), cache.WithDescription(m.vtx.Name())) if err != nil { return nil, err diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/opsutils/contenthash.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/opsutils/contenthash.go new file mode 100644 index 0000000000..8bdd8f939e --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/opsutils/contenthash.go @@ -0,0 +1,71 @@ +package opsutils + +import ( + "bytes" + "context" + "path" + + "github.com/moby/buildkit/cache/contenthash" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +type Selector struct { + Path string + Wildcard bool + FollowLinks bool + IncludePatterns []string + ExcludePatterns []string +} + +func (sel Selector) HasWildcardOrFilters() bool { + return sel.Wildcard || len(sel.IncludePatterns) != 0 || len(sel.ExcludePatterns) != 0 +} + +func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { + return func(ctx context.Context, res solver.Result, s session.Group) (digest.Digest, error) { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return "", errors.Errorf("invalid reference: %T", res) + } + + if len(selectors) == 0 { + selectors = []Selector{{}} + } + + dgsts := make([][]byte, len(selectors)) + + eg, ctx := errgroup.WithContext(ctx) + + for i, sel := range selectors { + i, sel := i, sel + eg.Go(func() error { + dgst, err := contenthash.Checksum( + ctx, ref.ImmutableRef, path.Join("/", sel.Path), + contenthash.ChecksumOpts{ + Wildcard: sel.Wildcard, + FollowLinks: sel.FollowLinks, + IncludePatterns: sel.IncludePatterns, + ExcludePatterns: sel.ExcludePatterns, + }, + s, + ) + if err != nil { + return errors.Wrapf(err, "failed to calculate checksum of ref %s", ref.ID()) + } + dgsts[i] = []byte(dgst) + return nil + }) + } + + if err := eg.Wait(); err != nil { + return "", err + } + + return digest.FromBytes(bytes.Join(dgsts, []byte{0})), nil + } +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/opsutils/validate.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/opsutils/validate.go new file mode 100644 index 0000000000..8e0d30d9ec --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/opsutils/validate.go @@ -0,0 +1,63 @@ +package opsutils + +import ( + "github.com/moby/buildkit/solver/pb" + "github.com/pkg/errors" +) + +func Validate(op *pb.Op) error { + if op == nil { + return errors.Errorf("invalid nil op") + } + + switch op := op.Op.(type) { + case *pb.Op_Source: + if op.Source == nil { + return errors.Errorf("invalid nil source op") + } + case *pb.Op_Exec: + if op.Exec == nil { + return errors.Errorf("invalid nil exec op") + } + if op.Exec.Meta == nil { + return errors.Errorf("invalid exec op with no meta") + } + if len(op.Exec.Meta.Args) == 0 { + return errors.Errorf("invalid exec op with no args") + } + if len(op.Exec.Mounts) == 0 { + return errors.Errorf("invalid exec op with no mounts") + } + + isRoot := false + for _, m := range op.Exec.Mounts { + if m.Dest == pb.RootMount { + isRoot = true + break + } + } + if !isRoot { + return errors.Errorf("invalid exec op with no rootfs") + } + case *pb.Op_File: + if op.File == nil { + return errors.Errorf("invalid nil file op") + } + if len(op.File.Actions) == 0 { + return errors.Errorf("invalid file op with no actions") + } + case *pb.Op_Build: + if op.Build == nil { + return errors.Errorf("invalid nil build op") + } + case *pb.Op_Merge: + if op.Merge == nil { + return errors.Errorf("invalid nil merge op") + } + case *pb.Op_Diff: + if op.Diff == nil { + return errors.Errorf("invalid nil diff op") + } + } + return nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go index d24a902da5..fabd300d4b 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go @@ -7,7 +7,7 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/source" "github.com/moby/buildkit/worker" @@ -17,7 +17,7 @@ import ( const sourceCacheType = "buildkit.source.v0" -type sourceOp struct { +type SourceOp struct { mu sync.Mutex op *pb.Op_Source platform *pb.Platform @@ -27,13 +27,17 @@ type sourceOp struct { w worker.Worker vtx solver.Vertex parallelism *semaphore.Weighted + pin string + id source.Identifier } -func NewSourceOp(vtx solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm *source.Manager, parallelism *semaphore.Weighted, sessM *session.Manager, w worker.Worker) (solver.Op, error) { - if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { +var _ solver.Op = &SourceOp{} + +func NewSourceOp(vtx solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm *source.Manager, parallelism *semaphore.Weighted, sessM *session.Manager, w worker.Worker) (*SourceOp, error) { + if err := opsutils.Validate(&pb.Op{Op: op}); err != nil { return nil, err } - return &sourceOp{ + return &SourceOp{ op: op, sm: sm, w: w, @@ -44,7 +48,13 @@ func NewSourceOp(vtx solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm }, nil } -func (s *sourceOp) instance(ctx context.Context) (source.SourceInstance, error) { +func (s *SourceOp) IsProvenanceProvider() {} + +func (s *SourceOp) Pin() (source.Identifier, string) { + return s.id, s.pin +} + +func (s *SourceOp) instance(ctx context.Context) (source.SourceInstance, error) { s.mu.Lock() defer s.mu.Unlock() if s.src != nil { @@ -59,10 +69,11 @@ func (s *sourceOp) instance(ctx context.Context) (source.SourceInstance, error) return nil, err } s.src = src + s.id = id return s.src, nil } -func (s *sourceOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { +func (s *SourceOp) CacheMap(ctx context.Context, g session.Group, index int) (*solver.CacheMap, bool, error) { src, err := s.instance(ctx) if err != nil { return nil, false, err @@ -73,25 +84,23 @@ func (s *sourceOp) CacheMap(ctx context.Context, g session.Group, index int) (*s return nil, false, err } + if s.pin == "" { + s.pin = pin + } + dgst := digest.FromBytes([]byte(sourceCacheType + ":" + k)) if strings.HasPrefix(k, "session:") { dgst = digest.Digest("random:" + strings.TrimPrefix(dgst.String(), dgst.Algorithm().String()+":")) } - var buildSources map[string]string - if !strings.HasPrefix(s.op.Source.GetIdentifier(), "local://") { - buildSources = map[string]string{s.op.Source.GetIdentifier(): pin} - } - return &solver.CacheMap{ // TODO: add os/arch - Digest: dgst, - Opts: cacheOpts, - BuildSources: buildSources, + Digest: dgst, + Opts: cacheOpts, }, done, nil } -func (s *sourceOp) Exec(ctx context.Context, g session.Group, _ []solver.Result) (outputs []solver.Result, err error) { +func (s *SourceOp) Exec(ctx context.Context, g session.Group, _ []solver.Result) (outputs []solver.Result, err error) { src, err := s.instance(ctx) if err != nil { return nil, err @@ -103,7 +112,7 @@ func (s *sourceOp) Exec(ctx context.Context, g session.Group, _ []solver.Result) return []solver.Result{worker.NewWorkerRefResult(ref, s.w)}, nil } -func (s *sourceOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { +func (s *SourceOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { if s.parallelism == nil { return func() {}, nil } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/proc/provenance.go b/vendor/github.com/moby/buildkit/solver/llbsolver/proc/provenance.go new file mode 100644 index 0000000000..1af3af1960 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/proc/provenance.go @@ -0,0 +1,77 @@ +package proc + +import ( + "context" + "encoding/json" + "strconv" + + slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + gatewaypb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/result" + "github.com/pkg/errors" +) + +func ProvenanceProcessor(attrs map[string]string) llbsolver.Processor { + return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job) (*llbsolver.Result, error) { + ps, err := exptypes.ParsePlatforms(res.Metadata) + if err != nil { + return nil, err + } + + var inlineOnly bool + if v, err := strconv.ParseBool(attrs["inline-only"]); v && err == nil { + inlineOnly = true + } + + for _, p := range ps.Platforms { + cp, ok := res.Provenance.FindRef(p.ID) + if !ok { + return nil, errors.Errorf("no build info found for provenance %s", p.ID) + } + + if cp == nil { + continue + } + + ref, ok := res.FindRef(p.ID) + if !ok { + return nil, errors.Errorf("could not find ref %s", p.ID) + } + + pc, err := llbsolver.NewProvenanceCreator(ctx, cp, ref, attrs, j) + if err != nil { + return nil, err + } + + filename := "provenance.json" + if v, ok := attrs["filename"]; ok { + filename = v + } + + res.AddAttestation(p.ID, llbsolver.Attestation{ + Kind: gatewaypb.AttestationKindInToto, + Metadata: map[string][]byte{ + result.AttestationReasonKey: []byte(result.AttestationReasonProvenance), + result.AttestationInlineOnlyKey: []byte(strconv.FormatBool(inlineOnly)), + }, + InToto: result.InTotoAttestation{ + PredicateType: slsa02.PredicateSLSAProvenance, + }, + Path: filename, + ContentFunc: func() ([]byte, error) { + pr, err := pc.Predicate() + if err != nil { + return nil, err + } + + return json.MarshalIndent(pr, "", " ") + }, + }) + } + + return res, nil + } +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/proc/sbom.go b/vendor/github.com/moby/buildkit/solver/llbsolver/proc/sbom.go new file mode 100644 index 0000000000..2d7e969ba5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/proc/sbom.go @@ -0,0 +1,76 @@ +package proc + +import ( + "context" + + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/attestations/sbom" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/result" + "github.com/pkg/errors" +) + +func SBOMProcessor(scannerRef string, useCache bool) llbsolver.Processor { + return func(ctx context.Context, res *llbsolver.Result, s *llbsolver.Solver, j *solver.Job) (*llbsolver.Result, error) { + // skip sbom generation if we already have an sbom + if sbom.HasSBOM(res.Result) { + return res, nil + } + + ps, err := exptypes.ParsePlatforms(res.Metadata) + if err != nil { + return nil, err + } + + scanner, err := sbom.CreateSBOMScanner(ctx, s.Bridge(j), scannerRef) + if err != nil { + return nil, err + } + if scanner == nil { + return res, nil + } + + for _, p := range ps.Platforms { + ref, ok := res.FindRef(p.ID) + if !ok { + return nil, errors.Errorf("could not find ref %s", p.ID) + } + defop, err := llb.NewDefinitionOp(ref.Definition()) + if err != nil { + return nil, err + } + st := llb.NewState(defop) + + var opts []llb.ConstraintsOpt + if !useCache { + opts = append(opts, llb.IgnoreCache) + } + att, err := scanner(ctx, p.ID, st, nil, opts...) + if err != nil { + return nil, err + } + attSolve, err := result.ConvertAttestation(&att, func(st llb.State) (solver.ResultProxy, error) { + def, err := st.Marshal(ctx) + if err != nil { + return nil, err + } + + r, err := s.Bridge(j).Solve(ctx, frontend.SolveRequest{ // TODO: buildinfo + Definition: def.ToPB(), + }, j.SessionID) + if err != nil { + return nil, err + } + return r.Ref, nil + }) + if err != nil { + return nil, err + } + res.AddAttestation(p.ID, *attSolve) + } + return res, nil + } +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance.go b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance.go new file mode 100644 index 0000000000..b30581c852 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance.go @@ -0,0 +1,571 @@ +package llbsolver + +import ( + "context" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/platforms" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/config" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/exporter/containerimage" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/ops" + "github.com/moby/buildkit/solver/llbsolver/provenance" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type resultWithBridge struct { + res *frontend.Result + bridge *provenanceBridge +} + +// provenanceBridge provides scoped access to LLBBridge and captures the request it makes for provenance +type provenanceBridge struct { + *llbBridge + mu sync.Mutex + req *frontend.SolveRequest + + images []provenance.ImageSource + builds []resultWithBridge + subBridges []*provenanceBridge +} + +func (b *provenanceBridge) eachRef(f func(r solver.ResultProxy) error) error { + for _, b := range b.builds { + if err := b.res.EachRef(f); err != nil { + return err + } + } + for _, b := range b.subBridges { + if err := b.eachRef(f); err != nil { + return err + } + } + return nil +} + +func (b *provenanceBridge) allImages() []provenance.ImageSource { + res := make([]provenance.ImageSource, 0, len(b.images)) + res = append(res, b.images...) + for _, sb := range b.subBridges { + res = append(res, sb.allImages()...) + } + return res +} + +func (b *provenanceBridge) requests(r *frontend.Result) (*resultRequests, error) { + reqs := &resultRequests{ + refs: make(map[string]*resultWithBridge), + atts: make(map[string][]*resultWithBridge), + } + + if r.Ref != nil { + ref, ok := b.findByResult(r.Ref) + if !ok { + return nil, errors.Errorf("could not find request for ref %s", r.Ref.ID()) + } + reqs.ref = ref + } + + for k, ref := range r.Refs { + r, ok := b.findByResult(ref) + if !ok { + return nil, errors.Errorf("could not find request for ref %s", ref.ID()) + } + reqs.refs[k] = r + } + + for k, atts := range r.Attestations { + for _, att := range atts { + if att.Ref == nil { + continue + } + r, ok := b.findByResult(att.Ref) + if !ok { + return nil, errors.Errorf("could not find request for ref %s", att.Ref.ID()) + } + reqs.atts[k] = append(reqs.atts[k], r) + } + } + + ps, err := exptypes.ParsePlatforms(r.Metadata) + if err != nil { + return nil, err + } + reqs.platforms = ps.Platforms + + return reqs, nil +} + +func (b *provenanceBridge) findByResult(rp solver.ResultProxy) (*resultWithBridge, bool) { + for _, br := range b.subBridges { + if req, ok := br.findByResult(rp); ok { + return req, true + } + } + for _, bld := range b.builds { + found := false + bld.res.EachRef(func(r solver.ResultProxy) error { + if r.ID() == rp.ID() { + found = true + } + return nil + }) + if found { + return &bld, true + } + } + return nil, false +} + +func (b *provenanceBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) { + dgst, config, err = b.llbBridge.ResolveImageConfig(ctx, ref, opt) + if err != nil { + return "", nil, err + } + + b.images = append(b.images, provenance.ImageSource{ + Ref: ref, + Platform: opt.Platform, + Digest: dgst, + }) + return dgst, config, nil +} + +func (b *provenanceBridge) Solve(ctx context.Context, req frontend.SolveRequest, sid string) (res *frontend.Result, err error) { + if req.Definition != nil && req.Definition.Def != nil && req.Frontend != "" { + return nil, errors.New("cannot solve with both Definition and Frontend specified") + } + + if req.Definition != nil && req.Definition.Def != nil { + rp := newResultProxy(b, req) + res = &frontend.Result{Ref: rp} + b.mu.Lock() + b.builds = append(b.builds, resultWithBridge{res: res, bridge: b}) + b.mu.Unlock() + } else if req.Frontend != "" { + f, ok := b.llbBridge.frontends[req.Frontend] + if !ok { + return nil, errors.Errorf("invalid frontend: %s", req.Frontend) + } + wb := &provenanceBridge{llbBridge: b.llbBridge, req: &req} + res, err = f.Solve(ctx, wb, req.FrontendOpt, req.FrontendInputs, sid, b.llbBridge.sm) + if err != nil { + return nil, err + } + wb.builds = append(wb.builds, resultWithBridge{res: res, bridge: wb}) + b.mu.Lock() + b.subBridges = append(b.subBridges, wb) + b.mu.Unlock() + } else { + return &frontend.Result{}, nil + } + if req.Evaluate { + err = res.EachRef(func(ref solver.ResultProxy) error { + _, err := res.Ref.Result(ctx) + return err + }) + } + return +} + +type resultRequests struct { + ref *resultWithBridge + refs map[string]*resultWithBridge + atts map[string][]*resultWithBridge + platforms []exptypes.Platform +} + +// filterImagePlatforms filter out images that not for the current platform if an image exists for every platform in a result +func (reqs *resultRequests) filterImagePlatforms(k string, imgs []provenance.ImageSource) []provenance.ImageSource { + if len(reqs.platforms) == 0 { + return imgs + } + m := map[string]string{} + for _, img := range imgs { + if _, ok := m[img.Ref]; ok { + continue + } + hasPlatform := true + for _, p := range reqs.platforms { + matcher := platforms.NewMatcher(p.Platform) + found := false + for _, img2 := range imgs { + if img.Ref == img2.Ref && img2.Platform != nil { + if matcher.Match(*img2.Platform) { + found = true + break + } + } + } + if !found { + hasPlatform = false + break + } + } + if hasPlatform { + m[img.Ref] = img.Ref + } + } + + var current ocispecs.Platform + for _, p := range reqs.platforms { + if p.ID == k { + current = p.Platform + } + } + + out := make([]provenance.ImageSource, 0, len(imgs)) + for _, img := range imgs { + if _, ok := m[img.Ref]; ok && img.Platform != nil { + if current.OS == img.Platform.OS && current.Architecture == img.Platform.Architecture { + out = append(out, img) + } + } else { + out = append(out, img) + } + } + return out +} + +func (reqs *resultRequests) allRes() map[string]struct{} { + res := make(map[string]struct{}) + if reqs.ref != nil { + res[reqs.ref.res.Ref.ID()] = struct{}{} + } + for _, r := range reqs.refs { + res[r.res.Ref.ID()] = struct{}{} + } + for _, rs := range reqs.atts { + for _, r := range rs { + res[r.res.Ref.ID()] = struct{}{} + } + } + return res +} + +func captureProvenance(ctx context.Context, res solver.CachedResultWithProvenance) (*provenance.Capture, error) { + if res == nil { + return nil, nil + } + c := &provenance.Capture{} + + err := res.WalkProvenance(ctx, func(pp solver.ProvenanceProvider) error { + switch op := pp.(type) { + case *ops.SourceOp: + id, pin := op.Pin() + switch s := id.(type) { + case *source.ImageIdentifier: + dgst, err := digest.Parse(pin) + if err != nil { + return errors.Wrapf(err, "failed to parse image digest %s", pin) + } + c.AddImage(provenance.ImageSource{ + Ref: s.Reference.String(), + Platform: s.Platform, + Digest: dgst, + }) + case *source.LocalIdentifier: + c.AddLocal(provenance.LocalSource{ + Name: s.Name, + }) + case *source.GitIdentifier: + url := s.Remote + if s.Ref != "" { + url += "#" + s.Ref + } + c.AddGit(provenance.GitSource{ + URL: url, + Commit: pin, + }) + if s.AuthTokenSecret != "" { + c.AddSecret(provenance.Secret{ + ID: s.AuthTokenSecret, + Optional: true, + }) + } + if s.AuthHeaderSecret != "" { + c.AddSecret(provenance.Secret{ + ID: s.AuthHeaderSecret, + Optional: true, + }) + } + if s.MountSSHSock != "" { + c.AddSSH(provenance.SSH{ + ID: s.MountSSHSock, + Optional: true, + }) + } + case *source.HTTPIdentifier: + dgst, err := digest.Parse(pin) + if err != nil { + return errors.Wrapf(err, "failed to parse HTTP digest %s", pin) + } + c.AddHTTP(provenance.HTTPSource{ + URL: s.URL, + Digest: dgst, + }) + case *source.OCIIdentifier: + dgst, err := digest.Parse(pin) + if err != nil { + return errors.Wrapf(err, "failed to parse OCI digest %s", pin) + } + c.AddLocalImage(provenance.ImageSource{ + Ref: s.Reference.String(), + Platform: s.Platform, + Digest: dgst, + }) + default: + return errors.Errorf("unknown source identifier %T", id) + } + case *ops.ExecOp: + pr := op.Proto() + for _, m := range pr.Mounts { + if m.MountType == pb.MountType_SECRET { + c.AddSecret(provenance.Secret{ + ID: m.SecretOpt.GetID(), + Optional: m.SecretOpt.GetOptional(), + }) + } + if m.MountType == pb.MountType_SSH { + c.AddSSH(provenance.SSH{ + ID: m.SSHOpt.GetID(), + Optional: m.SSHOpt.GetOptional(), + }) + } + } + for _, se := range pr.Secretenv { + c.AddSecret(provenance.Secret{ + ID: se.GetID(), + Optional: se.GetOptional(), + }) + } + if pr.Network != pb.NetMode_NONE { + c.NetworkAccess = true + } + case *ops.BuildOp: + c.IncompleteMaterials = true // not supported yet + } + return nil + }) + if err != nil { + return nil, err + } + return c, err +} + +type ProvenanceCreator struct { + pr *provenance.ProvenancePredicate + j *solver.Job + addLayers func() error +} + +func NewProvenanceCreator(ctx context.Context, cp *provenance.Capture, res solver.ResultProxy, attrs map[string]string, j *solver.Job) (*ProvenanceCreator, error) { + var reproducible bool + if v, ok := attrs["reproducible"]; ok { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse reproducible flag %q", v) + } + reproducible = b + } + + mode := "max" + if v, ok := attrs["mode"]; ok { + switch v { + case "full": + mode = "max" + case "max", "min": + mode = v + default: + return nil, errors.Errorf("invalid mode %q", v) + } + } + + pr, err := provenance.NewPredicate(cp) + if err != nil { + return nil, err + } + + st := j.StartedTime() + + pr.Metadata.BuildStartedOn = &st + pr.Metadata.Reproducible = reproducible + pr.Metadata.BuildInvocationID = j.UniqueID() + + pr.Builder.ID = attrs["builder-id"] + + var addLayers func() error + + switch mode { + case "min": + args := make(map[string]string) + for k, v := range pr.Invocation.Parameters.Args { + if strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") { + pr.Metadata.Completeness.Parameters = false + continue + } + args[k] = v + } + pr.Invocation.Parameters.Args = args + pr.Invocation.Parameters.Secrets = nil + pr.Invocation.Parameters.SSH = nil + case "max": + dgsts, err := provenance.AddBuildConfig(ctx, pr, res) + if err != nil { + return nil, err + } + + r, err := res.Result(ctx) + if err != nil { + return nil, err + } + + wref, ok := r.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid worker ref %T", r.Sys()) + } + + addLayers = func() error { + e := newCacheExporter() + + if wref.ImmutableRef != nil { + ctx = withDescHandlerCacheOpts(ctx, wref.ImmutableRef) + } + + if _, err := r.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{ + ResolveRemotes: resolveRemotes, + Mode: solver.CacheExportModeRemoteOnly, + ExportRoots: true, + }); err != nil { + return err + } + + m := map[string][][]ocispecs.Descriptor{} + + for l, descs := range e.layers { + idx, ok := dgsts[l.digest] + if !ok { + continue + } + + m[fmt.Sprintf("step%d:%d", idx, l.index)] = descs + } + + if len(m) != 0 { + if pr.Metadata == nil { + pr.Metadata = &provenance.ProvenanceMetadata{} + } + + pr.Metadata.BuildKitMetadata.Layers = m + } + + return nil + } + default: + return nil, errors.Errorf("invalid mode %q", mode) + } + + return &ProvenanceCreator{ + pr: pr, + j: j, + addLayers: addLayers, + }, nil +} + +func (p *ProvenanceCreator) Predicate() (*provenance.ProvenancePredicate, error) { + end := p.j.RegisterCompleteTime() + p.pr.Metadata.BuildFinishedOn = &end + + if p.addLayers != nil { + if err := p.addLayers(); err != nil { + return nil, err + } + } + + return p.pr, nil +} + +type edge struct { + digest digest.Digest + index int +} + +func newCacheExporter() *cacheExporter { + return &cacheExporter{ + m: map[interface{}]struct{}{}, + layers: map[edge][][]ocispecs.Descriptor{}, + } +} + +type cacheExporter struct { + layers map[edge][][]ocispecs.Descriptor + m map[interface{}]struct{} +} + +func (ce *cacheExporter) Add(dgst digest.Digest) solver.CacheExporterRecord { + return &cacheRecord{ + ce: ce, + } +} + +func (ce *cacheExporter) Visit(v interface{}) { + ce.m[v] = struct{}{} +} + +func (ce *cacheExporter) Visited(v interface{}) bool { + _, ok := ce.m[v] + return ok +} + +type cacheRecord struct { + ce *cacheExporter +} + +func (c *cacheRecord) AddResult(dgst digest.Digest, idx int, createdAt time.Time, result *solver.Remote) { + if result == nil || dgst == "" { + return + } + e := edge{ + digest: dgst, + index: idx, + } + descs := make([]ocispecs.Descriptor, len(result.Descriptors)) + for i, desc := range result.Descriptors { + d := desc + d.Annotations = containerimage.RemoveInternalLayerAnnotations(d.Annotations, true) + descs[i] = d + } + c.ce.layers[e] = append(c.ce.layers[e], descs) +} + +func (c *cacheRecord) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) { +} + +func resolveRemotes(ctx context.Context, res solver.Result) ([]*solver.Remote, error) { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid result: %T", res.Sys()) + } + + remotes, err := ref.GetRemotes(ctx, false, config.RefConfig{}, true, nil) + if err != nil { + if errors.Is(err, cache.ErrNoBlobs) { + return nil, nil + } + return nil, err + } + return remotes, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/buildconfig.go b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/buildconfig.go new file mode 100644 index 0000000000..4d9bf85ec1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/buildconfig.go @@ -0,0 +1,187 @@ +package provenance + +import ( + "context" + "fmt" + + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type BuildConfig struct { + Definition []BuildStep `json:"llbDefinition,omitempty"` + DigestMapping map[digest.Digest]string `json:"digestMapping,omitempty"` +} + +type BuildStep struct { + ID string `json:"id,omitempty"` + Op interface{} `json:"op,omitempty"` + Inputs []string `json:"inputs,omitempty"` +} + +type Source struct { + Locations map[string]*pb.Locations `json:"locations,omitempty"` + Infos []SourceInfo `json:"infos,omitempty"` +} + +type SourceInfo struct { + Filename string `json:"filename,omitempty"` + Data []byte `json:"data,omitempty"` + Definition []BuildStep `json:"llbDefinition,omitempty"` + DigestMapping map[digest.Digest]string `json:"digestMapping,omitempty"` +} + +func digestMap(idx map[digest.Digest]int) map[digest.Digest]string { + m := map[digest.Digest]string{} + for k, v := range idx { + m[k] = fmt.Sprintf("step%d", v) + } + return m +} + +func AddBuildConfig(ctx context.Context, p *ProvenancePredicate, rp solver.ResultProxy) (map[digest.Digest]int, error) { + def := rp.Definition() + steps, indexes, err := toBuildSteps(def) + if err != nil { + return nil, err + } + + bc := &BuildConfig{ + Definition: steps, + DigestMapping: digestMap(indexes), + } + + p.BuildConfig = bc + + if def.Source != nil { + sis := make([]SourceInfo, len(def.Source.Infos)) + for i, si := range def.Source.Infos { + steps, indexes, err := toBuildSteps(si.Definition) + if err != nil { + return nil, err + } + s := SourceInfo{ + Filename: si.Filename, + Data: si.Data, + Definition: steps, + DigestMapping: digestMap(indexes), + } + sis[i] = s + } + + if len(def.Source.Infos) != 0 { + locs := map[string]*pb.Locations{} + for k, l := range def.Source.Locations { + idx, ok := indexes[digest.Digest(k)] + if !ok { + continue + } + locs[fmt.Sprintf("step%d", idx)] = l + } + + if p.Metadata == nil { + p.Metadata = &ProvenanceMetadata{} + } + p.Metadata.BuildKitMetadata.Source = &Source{ + Infos: sis, + Locations: locs, + } + } + } + + return indexes, nil +} + +func toBuildSteps(def *pb.Definition) ([]BuildStep, map[digest.Digest]int, error) { + if def == nil || len(def.Def) == 0 { + return nil, nil, nil + } + + ops := make(map[digest.Digest]*pb.Op) + defs := make(map[digest.Digest][]byte) + + var dgst digest.Digest + for _, dt := range def.Def { + var op pb.Op + if err := (&op).Unmarshal(dt); err != nil { + return nil, nil, errors.Wrap(err, "failed to parse llb proto op") + } + if src := op.GetSource(); src != nil { + for k := range src.Attrs { + if k == "local.session" || k == "local.unique" { + delete(src.Attrs, k) + } + } + } + dgst = digest.FromBytes(dt) + ops[dgst] = &op + defs[dgst] = dt + } + + if dgst == "" { + return nil, nil, nil + } + + // depth first backwards + dgsts := make([]digest.Digest, 0, len(def.Def)) + op := ops[dgst] + + if op.Op != nil { + return nil, nil, errors.Errorf("invalid last vertex: %T", op.Op) + } + + if len(op.Inputs) != 1 { + return nil, nil, errors.Errorf("invalid last vertex inputs: %v", len(op.Inputs)) + } + + visited := map[digest.Digest]struct{}{} + dgsts, err := walkDigests(dgsts, ops, dgst, visited) + if err != nil { + return nil, nil, err + } + indexes := map[digest.Digest]int{} + for i, dgst := range dgsts { + indexes[dgst] = i + } + + out := make([]BuildStep, 0, len(dgsts)) + for i, dgst := range dgsts { + op := *ops[dgst] + inputs := make([]string, len(op.Inputs)) + for i, inp := range op.Inputs { + inputs[i] = fmt.Sprintf("step%d:%d", indexes[inp.Digest], inp.Index) + } + op.Inputs = nil + out = append(out, BuildStep{ + ID: fmt.Sprintf("step%d", i), + Inputs: inputs, + Op: op, + }) + } + return out, indexes, nil +} + +func walkDigests(dgsts []digest.Digest, ops map[digest.Digest]*pb.Op, dgst digest.Digest, visited map[digest.Digest]struct{}) ([]digest.Digest, error) { + if _, ok := visited[dgst]; ok { + return dgsts, nil + } + op, ok := ops[dgst] + if !ok { + return nil, errors.Errorf("failed to find input %v", dgst) + } + if op == nil { + return nil, errors.Errorf("invalid nil input %v", dgst) + } + visited[dgst] = struct{}{} + for _, inp := range op.Inputs { + var err error + dgsts, err = walkDigests(dgsts, ops, inp.Digest, visited) + if err != nil { + return nil, err + } + } + dgsts = append(dgsts, dgst) + return dgsts, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/capture.go b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/capture.go new file mode 100644 index 0000000000..a176e9875f --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/capture.go @@ -0,0 +1,248 @@ +package provenance + +import ( + "sort" + + distreference "github.com/docker/distribution/reference" + "github.com/moby/buildkit/solver/result" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +type Result = result.Result[*Capture] + +type ImageSource struct { + Ref string + Platform *ocispecs.Platform + Digest digest.Digest +} + +type GitSource struct { + URL string + Commit string +} + +type HTTPSource struct { + URL string + Digest digest.Digest +} + +type LocalSource struct { + Name string `json:"name"` +} + +type Secret struct { + ID string `json:"id"` + Optional bool `json:"optional,omitempty"` +} + +type SSH struct { + ID string `json:"id"` + Optional bool `json:"optional,omitempty"` +} + +type Sources struct { + Images []ImageSource + LocalImages []ImageSource + Git []GitSource + HTTP []HTTPSource + Local []LocalSource +} + +type Capture struct { + Frontend string + Args map[string]string + Sources Sources + Secrets []Secret + SSH []SSH + NetworkAccess bool + IncompleteMaterials bool +} + +func (c *Capture) Merge(c2 *Capture) error { + if c2 == nil { + return nil + } + for _, i := range c2.Sources.Images { + c.AddImage(i) + } + for _, i := range c2.Sources.LocalImages { + c.AddLocalImage(i) + } + for _, l := range c2.Sources.Local { + c.AddLocal(l) + } + for _, g := range c2.Sources.Git { + c.AddGit(g) + } + for _, h := range c2.Sources.HTTP { + c.AddHTTP(h) + } + for _, s := range c2.Secrets { + c.AddSecret(s) + } + for _, s := range c2.SSH { + c.AddSSH(s) + } + if c2.NetworkAccess { + c.NetworkAccess = true + } + if c2.IncompleteMaterials { + c.IncompleteMaterials = true + } + return nil +} + +func (c *Capture) Sort() { + sort.Slice(c.Sources.Images, func(i, j int) bool { + return c.Sources.Images[i].Ref < c.Sources.Images[j].Ref + }) + sort.Slice(c.Sources.LocalImages, func(i, j int) bool { + return c.Sources.LocalImages[i].Ref < c.Sources.LocalImages[j].Ref + }) + sort.Slice(c.Sources.Local, func(i, j int) bool { + return c.Sources.Local[i].Name < c.Sources.Local[j].Name + }) + sort.Slice(c.Sources.Git, func(i, j int) bool { + return c.Sources.Git[i].URL < c.Sources.Git[j].URL + }) + sort.Slice(c.Sources.HTTP, func(i, j int) bool { + return c.Sources.HTTP[i].URL < c.Sources.HTTP[j].URL + }) + sort.Slice(c.Secrets, func(i, j int) bool { + return c.Secrets[i].ID < c.Secrets[j].ID + }) + sort.Slice(c.SSH, func(i, j int) bool { + return c.SSH[i].ID < c.SSH[j].ID + }) +} + +// OptimizeImageSources filters out image sources by digest reference if same digest +// is already present by a tag reference. +func (c *Capture) OptimizeImageSources() error { + m := map[string]struct{}{} + for _, i := range c.Sources.Images { + ref, nameTag, err := parseRefName(i.Ref) + if err != nil { + return err + } + if _, ok := ref.(distreference.Canonical); !ok { + m[nameTag] = struct{}{} + } + } + + images := make([]ImageSource, 0, len(c.Sources.Images)) + for _, i := range c.Sources.Images { + ref, nameTag, err := parseRefName(i.Ref) + if err != nil { + return err + } + if _, ok := ref.(distreference.Canonical); ok { + if _, ok := m[nameTag]; ok { + continue + } + } + images = append(images, i) + } + c.Sources.Images = images + return nil +} + +func (c *Capture) AddImage(i ImageSource) { + for _, v := range c.Sources.Images { + if v.Ref == i.Ref { + if v.Platform == i.Platform { + return + } + if v.Platform != nil && i.Platform != nil { + if v.Platform.Architecture == i.Platform.Architecture && v.Platform.OS == i.Platform.OS && v.Platform.Variant == i.Platform.Variant { + return + } + } + } + } + c.Sources.Images = append(c.Sources.Images, i) +} + +func (c *Capture) AddLocalImage(i ImageSource) { + for _, v := range c.Sources.LocalImages { + if v.Ref == i.Ref { + if v.Platform == i.Platform { + return + } + if v.Platform != nil && i.Platform != nil { + if v.Platform.Architecture == i.Platform.Architecture && v.Platform.OS == i.Platform.OS && v.Platform.Variant == i.Platform.Variant { + return + } + } + } + } + c.Sources.LocalImages = append(c.Sources.LocalImages, i) +} + +func (c *Capture) AddLocal(l LocalSource) { + for _, v := range c.Sources.Local { + if v.Name == l.Name { + return + } + } + c.Sources.Local = append(c.Sources.Local, l) +} + +func (c *Capture) AddGit(g GitSource) { + for _, v := range c.Sources.Git { + if v.URL == g.URL { + return + } + } + c.Sources.Git = append(c.Sources.Git, g) +} + +func (c *Capture) AddHTTP(h HTTPSource) { + for _, v := range c.Sources.HTTP { + if v.URL == h.URL { + return + } + } + c.Sources.HTTP = append(c.Sources.HTTP, h) +} + +func (c *Capture) AddSecret(s Secret) { + for i, v := range c.Secrets { + if v.ID == s.ID { + if !s.Optional { + c.Secrets[i].Optional = false + } + return + } + } + c.Secrets = append(c.Secrets, s) +} + +func (c *Capture) AddSSH(s SSH) { + if s.ID == "" { + s.ID = "default" + } + for i, v := range c.SSH { + if v.ID == s.ID { + if !s.Optional { + c.SSH[i].Optional = false + } + return + } + } + c.SSH = append(c.SSH, s) +} + +func parseRefName(s string) (distreference.Named, string, error) { + ref, err := distreference.ParseNormalizedNamed(s) + if err != nil { + return nil, "", err + } + name := ref.Name() + tag := "latest" + if r, ok := ref.(distreference.Tagged); ok { + tag = r.Tag() + } + return ref, name + ":" + tag, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/predicate.go b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/predicate.go new file mode 100644 index 0000000000..7608f5cfae --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/predicate.go @@ -0,0 +1,245 @@ +package provenance + +import ( + "strings" + + "github.com/containerd/containerd/platforms" + slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" + slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + "github.com/moby/buildkit/util/purl" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/package-url/packageurl-go" +) + +const ( + BuildKitBuildType = "https://mobyproject.org/buildkit@v1" +) + +type ProvenancePredicate struct { + slsa02.ProvenancePredicate + Invocation ProvenanceInvocation `json:"invocation,omitempty"` + BuildConfig *BuildConfig `json:"buildConfig,omitempty"` + Metadata *ProvenanceMetadata `json:"metadata,omitempty"` +} + +type ProvenanceInvocation struct { + ConfigSource slsa02.ConfigSource `json:"configSource,omitempty"` + Parameters Parameters `json:"parameters,omitempty"` + Environment Environment `json:"environment,omitempty"` +} + +type Parameters struct { + Frontend string `json:"frontend,omitempty"` + Args map[string]string `json:"args,omitempty"` + Secrets []*Secret `json:"secrets,omitempty"` + SSH []*SSH `json:"ssh,omitempty"` + Locals []*LocalSource `json:"locals,omitempty"` + // TODO: select export attributes + // TODO: frontend inputs +} + +type Environment struct { + Platform string `json:"platform"` +} + +type ProvenanceMetadata struct { + slsa02.ProvenanceMetadata + BuildKitMetadata BuildKitMetadata `json:"https://mobyproject.org/buildkit@v1#metadata,omitempty"` + Hermetic bool `json:"https://mobyproject.org/buildkit@v1#hermetic,omitempty"` +} + +type BuildKitMetadata struct { + VCS map[string]string `json:"vcs,omitempty"` + Source *Source `json:"source,omitempty"` + Layers map[string][][]ocispecs.Descriptor `json:"layers,omitempty"` +} + +func slsaMaterials(srcs Sources) ([]slsa.ProvenanceMaterial, error) { + count := len(srcs.Images) + len(srcs.Git) + len(srcs.HTTP) + len(srcs.LocalImages) + out := make([]slsa.ProvenanceMaterial, 0, count) + + for _, s := range srcs.Images { + uri, err := purl.RefToPURL(s.Ref, s.Platform) + if err != nil { + return nil, err + } + out = append(out, slsa.ProvenanceMaterial{ + URI: uri, + Digest: slsa.DigestSet{ + s.Digest.Algorithm().String(): s.Digest.Hex(), + }, + }) + } + + for _, s := range srcs.Git { + out = append(out, slsa.ProvenanceMaterial{ + URI: s.URL, + Digest: slsa.DigestSet{ + "sha1": s.Commit, + }, + }) + } + + for _, s := range srcs.HTTP { + out = append(out, slsa.ProvenanceMaterial{ + URI: s.URL, + Digest: slsa.DigestSet{ + s.Digest.Algorithm().String(): s.Digest.Hex(), + }, + }) + } + + for _, s := range srcs.LocalImages { + q := []packageurl.Qualifier{} + if s.Platform != nil { + q = append(q, packageurl.Qualifier{ + Key: "platform", + Value: platforms.Format(*s.Platform), + }) + } + packageurl.NewPackageURL(packageurl.TypeOCI, "", s.Ref, "", q, "") + out = append(out, slsa.ProvenanceMaterial{ + URI: s.Ref, + Digest: slsa.DigestSet{ + s.Digest.Algorithm().String(): s.Digest.Hex(), + }, + }) + } + return out, nil +} + +func findMaterial(srcs Sources, uri string) (*slsa.ProvenanceMaterial, bool) { + for _, s := range srcs.Git { + if s.URL == uri { + return &slsa.ProvenanceMaterial{ + URI: s.URL, + Digest: slsa.DigestSet{ + "sha1": s.Commit, + }, + }, true + } + } + for _, s := range srcs.HTTP { + if s.URL == uri { + return &slsa.ProvenanceMaterial{ + URI: s.URL, + Digest: slsa.DigestSet{ + s.Digest.Algorithm().String(): s.Digest.Hex(), + }, + }, true + } + } + return nil, false +} + +func NewPredicate(c *Capture) (*ProvenancePredicate, error) { + materials, err := slsaMaterials(c.Sources) + if err != nil { + return nil, err + } + inv := ProvenanceInvocation{} + + contextKey := "context" + if v, ok := c.Args["contextkey"]; ok && v != "" { + contextKey = v + } + + if v, ok := c.Args[contextKey]; ok && v != "" { + if m, ok := findMaterial(c.Sources, v); ok { + inv.ConfigSource.URI = m.URI + inv.ConfigSource.Digest = m.Digest + } else { + inv.ConfigSource.URI = v + } + delete(c.Args, contextKey) + } + + if v, ok := c.Args["filename"]; ok && v != "" { + inv.ConfigSource.EntryPoint = v + delete(c.Args, "filename") + } + + vcs := make(map[string]string) + for k, v := range c.Args { + if strings.HasPrefix(k, "vcs:") { + delete(c.Args, k) + if v != "" { + vcs[strings.TrimPrefix(k, "vcs:")] = v + } + } + } + + inv.Environment.Platform = platforms.Format(platforms.Normalize(platforms.DefaultSpec())) + + inv.Parameters.Frontend = c.Frontend + inv.Parameters.Args = c.Args + + for _, s := range c.Secrets { + inv.Parameters.Secrets = append(inv.Parameters.Secrets, &Secret{ + ID: s.ID, + Optional: s.Optional, + }) + } + for _, s := range c.SSH { + inv.Parameters.SSH = append(inv.Parameters.SSH, &SSH{ + ID: s.ID, + Optional: s.Optional, + }) + } + for _, s := range c.Sources.Local { + inv.Parameters.Locals = append(inv.Parameters.Locals, &LocalSource{ + Name: s.Name, + }) + } + + incompleteMaterials := c.IncompleteMaterials + if !incompleteMaterials { + if len(c.Sources.Local) > 0 { + incompleteMaterials = true + } + } + + pr := &ProvenancePredicate{ + Invocation: inv, + ProvenancePredicate: slsa02.ProvenancePredicate{ + BuildType: BuildKitBuildType, + Materials: materials, + }, + Metadata: &ProvenanceMetadata{ + ProvenanceMetadata: slsa02.ProvenanceMetadata{ + Completeness: slsa02.ProvenanceComplete{ + Parameters: c.Frontend != "", + Environment: true, + Materials: !incompleteMaterials, + }, + }, + Hermetic: !incompleteMaterials && !c.NetworkAccess, + }, + } + + if len(vcs) > 0 { + pr.Metadata.BuildKitMetadata.VCS = vcs + } + + return pr, nil +} + +func FilterArgs(m map[string]string) map[string]string { + var hostSpecificArgs = map[string]struct{}{ + "cgroup-parent": {}, + "image-resolve-mode": {}, + "platform": {}, + "cache-imports": {}, + } + out := make(map[string]string) + for k, v := range m { + if _, ok := hostSpecificArgs[k]; ok { + continue + } + if strings.HasPrefix(k, "attest:") { + continue + } + out[k] = v + } + return out +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/result.go b/vendor/github.com/moby/buildkit/solver/llbsolver/result.go index 0cadda547d..718b1b09d3 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/result.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/result.go @@ -1,86 +1,23 @@ package llbsolver import ( - "bytes" "context" - "path" cacheconfig "github.com/moby/buildkit/cache/config" - "github.com/moby/buildkit/cache/contenthash" + "github.com/moby/buildkit/frontend" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/provenance" "github.com/moby/buildkit/worker" - digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "golang.org/x/sync/errgroup" ) -type Selector struct { - Path string - Wildcard bool - FollowLinks bool - IncludePatterns []string - ExcludePatterns []string +type Result struct { + *frontend.Result + Provenance *provenance.Result } -func (sel Selector) HasWildcardOrFilters() bool { - return sel.Wildcard || len(sel.IncludePatterns) != 0 || len(sel.ExcludePatterns) != 0 -} - -func UnlazyResultFunc(ctx context.Context, res solver.Result, g session.Group) error { - ref, ok := res.Sys().(*worker.WorkerRef) - if !ok { - return errors.Errorf("invalid reference: %T", res) - } - if ref.ImmutableRef == nil { - return nil - } - return ref.ImmutableRef.Extract(ctx, g) -} - -func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { - return func(ctx context.Context, res solver.Result, s session.Group) (digest.Digest, error) { - ref, ok := res.Sys().(*worker.WorkerRef) - if !ok { - return "", errors.Errorf("invalid reference: %T", res) - } - - if len(selectors) == 0 { - selectors = []Selector{{}} - } - - dgsts := make([][]byte, len(selectors)) - - eg, ctx := errgroup.WithContext(ctx) - - for i, sel := range selectors { - i, sel := i, sel - eg.Go(func() error { - dgst, err := contenthash.Checksum( - ctx, ref.ImmutableRef, path.Join("/", sel.Path), - contenthash.ChecksumOpts{ - Wildcard: sel.Wildcard, - FollowLinks: sel.FollowLinks, - IncludePatterns: sel.IncludePatterns, - ExcludePatterns: sel.ExcludePatterns, - }, - s, - ) - if err != nil { - return errors.Wrapf(err, "failed to calculate checksum of ref %s", ref.ID()) - } - dgsts[i] = []byte(dgst) - return nil - }) - } - - if err := eg.Wait(); err != nil { - return "", err - } - - return digest.FromBytes(bytes.Join(dgsts, []byte{0})), nil - } -} +type Attestation = frontend.Attestation func workerRefResolver(refCfg cacheconfig.RefConfig, all bool, g session.Group) func(ctx context.Context, res solver.Result) ([]*solver.Remote, error) { return func(ctx context.Context, res solver.Result) ([]*solver.Remote, error) { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go index ee06233da5..2f7ba61e5f 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go @@ -3,10 +3,15 @@ package llbsolver import ( "context" "encoding/base64" + "encoding/json" "fmt" + "os" "strings" + "sync" "time" + slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/cache" cacheconfig "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/cache/remotecache" @@ -19,27 +24,60 @@ import ( "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/provenance" + "github.com/moby/buildkit/solver/result" + spb "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/moby/buildkit/util/attestation" "github.com/moby/buildkit/util/buildinfo" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/entitlements" + "github.com/moby/buildkit/util/grpcerrors" "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/tracing/detect" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/sdk/trace/tracetest" + "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) -const keyEntitlements = "llb.entitlements" +const ( + keyEntitlements = "llb.entitlements" + keySourcePolicy = "llb.sourcepolicy" +) type ExporterRequest struct { - Exporter exporter.ExporterInstance - CacheExporter remotecache.Exporter - CacheExportMode solver.CacheExportMode + Type string + Attrs map[string]string + Exporter exporter.ExporterInstance + CacheExporters []RemoteCacheExporter +} + +type RemoteCacheExporter struct { + remotecache.Exporter + solver.CacheExportMode + IgnoreError bool } // ResolveWorkerFunc returns default worker for the temporary default non-distributed use cases type ResolveWorkerFunc func() (worker.Worker, error) +// Opt defines options for new Solver. +type Opt struct { + CacheManager solver.CacheManager + CacheResolvers map[string]remotecache.ResolveCacheImporterFunc + Entitlements []string + Frontends map[string]frontend.Frontend + GatewayForwarder *controlgateway.GatewayForwarder + SessionManager *session.Manager + WorkerController *worker.Controller + HistoryQueue *HistoryQueue +} + type Solver struct { workerController *worker.Controller solver *solver.Solver @@ -50,23 +88,29 @@ type Solver struct { gatewayForwarder *controlgateway.GatewayForwarder sm *session.Manager entitlements []string + history *HistoryQueue } -func New(wc *worker.Controller, f map[string]frontend.Frontend, cache solver.CacheManager, resolveCI map[string]remotecache.ResolveCacheImporterFunc, gatewayForwarder *controlgateway.GatewayForwarder, sm *session.Manager, ents []string) (*Solver, error) { +// Processor defines a processing function to be applied after solving, but +// before exporting +type Processor func(ctx context.Context, result *Result, s *Solver, j *solver.Job) (*Result, error) + +func New(opt Opt) (*Solver, error) { s := &Solver{ - workerController: wc, - resolveWorker: defaultResolver(wc), - eachWorker: allWorkers(wc), - frontends: f, - resolveCacheImporterFuncs: resolveCI, - gatewayForwarder: gatewayForwarder, - sm: sm, - entitlements: ents, + workerController: opt.WorkerController, + resolveWorker: defaultResolver(opt.WorkerController), + eachWorker: allWorkers(opt.WorkerController), + frontends: opt.Frontends, + resolveCacheImporterFuncs: opt.CacheResolvers, + gatewayForwarder: opt.GatewayForwarder, + sm: opt.SessionManager, + entitlements: opt.Entitlements, + history: opt.HistoryQueue, } s.solver = solver.NewSolver(solver.SolverOpt{ ResolveOpFunc: s.resolver(), - DefaultCache: cache, + DefaultCache: opt.CacheManager, }) return s, nil } @@ -81,8 +125,8 @@ func (s *Solver) resolver() solver.ResolveOpFunc { } } -func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge { - return &llbBridge{ +func (s *Solver) bridge(b solver.Builder) *provenanceBridge { + return &provenanceBridge{llbBridge: &llbBridge{ builder: b, frontends: s.frontends, resolveWorker: s.resolveWorker, @@ -90,10 +134,272 @@ func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge { resolveCacheImporterFuncs: s.resolveCacheImporterFuncs, cms: map[string]solver.CacheManager{}, sm: s.sm, - } + }} } -func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req frontend.SolveRequest, exp ExporterRequest, ent []entitlements.Entitlement) (*client.SolveResponse, error) { +func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge { + return s.bridge(b) +} + +func (s *Solver) recordBuildHistory(ctx context.Context, id string, req frontend.SolveRequest, exp ExporterRequest, j *solver.Job) (func(*Result, exporter.DescriptorReference, error) error, error) { + var stopTrace func() []tracetest.SpanStub + + if s := trace.SpanFromContext(ctx); s.SpanContext().IsValid() { + if exp, err := detect.Exporter(); err == nil { + if rec, ok := exp.(*detect.TraceRecorder); ok { + stopTrace = rec.Record(s.SpanContext().TraceID()) + } + } + } + + st := time.Now() + rec := &controlapi.BuildHistoryRecord{ + Ref: id, + Frontend: req.Frontend, + FrontendAttrs: req.FrontendOpt, + CreatedAt: &st, + } + + if exp.Type != "" { + rec.Exporters = []*controlapi.Exporter{{ + Type: exp.Type, + Attrs: exp.Attrs, + }} + } + + if err := s.history.Update(ctx, &controlapi.BuildHistoryEvent{ + Type: controlapi.BuildHistoryEventType_STARTED, + Record: rec, + }); err != nil { + return nil, err + } + + return func(res *Result, descref exporter.DescriptorReference, err error) error { + en := time.Now() + rec.CompletedAt = &en + + j.CloseProgress() + + if res != nil && len(res.Metadata) > 0 { + rec.ExporterResponse = map[string]string{} + for k, v := range res.Metadata { + rec.ExporterResponse[k] = string(v) + } + } + + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + var mu sync.Mutex + ch := make(chan *client.SolveStatus) + eg, ctx2 := errgroup.WithContext(ctx) + var releasers []func() + + attrs := map[string]string{ + "mode": "max", + } + + makeProvenance := func(res solver.ResultProxy, cap *provenance.Capture) (*controlapi.Descriptor, func(), error) { + prc, err := NewProvenanceCreator(ctx2, cap, res, attrs, j) + if err != nil { + return nil, nil, err + } + pr, err := prc.Predicate() + if err != nil { + return nil, nil, err + } + dt, err := json.MarshalIndent(pr, "", " ") + if err != nil { + return nil, nil, err + } + w, err := s.history.OpenBlobWriter(ctx, attestation.MediaTypeDockerSchema2AttestationType) + if err != nil { + return nil, nil, err + } + defer func() { + if w != nil { + w.Discard() + } + }() + if _, err := w.Write(dt); err != nil { + return nil, nil, err + } + desc, release, err := w.Commit(ctx2) + if err != nil { + return nil, nil, err + } + w = nil + return &controlapi.Descriptor{ + Digest: desc.Digest, + Size_: desc.Size, + MediaType: desc.MediaType, + Annotations: map[string]string{ + "in-toto.io/predicate-type": slsa02.PredicateSLSAProvenance, + }, + }, release, nil + } + + if res != nil { + if res.Ref != nil { + eg.Go(func() error { + desc, release, err := makeProvenance(res.Ref, res.Provenance.Ref) + if err != nil { + return err + } + + mu.Lock() + releasers = append(releasers, release) + if rec.Result == nil { + rec.Result = &controlapi.BuildResultInfo{} + } + rec.Result.Attestations = append(rec.Result.Attestations, desc) + mu.Unlock() + return nil + }) + } + + for k, r := range res.Refs { + k, r := k, r + cp := res.Provenance.Refs[k] + eg.Go(func() error { + desc, release, err := makeProvenance(r, cp) + if err != nil { + return err + } + + mu.Lock() + releasers = append(releasers, release) + if rec.Results == nil { + rec.Results = make(map[string]*controlapi.BuildResultInfo) + } + if rec.Results[k] == nil { + rec.Results[k] = &controlapi.BuildResultInfo{} + } + rec.Results[k].Attestations = append(rec.Results[k].Attestations, desc) + mu.Unlock() + return nil + }) + } + } + + eg.Go(func() error { + st, releaseStatus, err := s.history.ImportStatus(ctx2, ch) + if err != nil { + return err + } + mu.Lock() + releasers = append(releasers, releaseStatus) + rec.Logs = &controlapi.Descriptor{ + Digest: st.Descriptor.Digest, + Size_: st.Descriptor.Size, + MediaType: st.Descriptor.MediaType, + } + rec.NumCachedSteps = int32(st.NumCachedSteps) + rec.NumCompletedSteps = int32(st.NumCompletedSteps) + rec.NumTotalSteps = int32(st.NumTotalSteps) + mu.Unlock() + return nil + }) + eg.Go(func() error { + return j.Status(ctx2, ch) + }) + + if descref != nil { + eg.Go(func() error { + mu.Lock() + if rec.Result == nil { + rec.Result = &controlapi.BuildResultInfo{} + } + desc := descref.Descriptor() + rec.Result.Result = &controlapi.Descriptor{ + Digest: desc.Digest, + Size_: desc.Size, + MediaType: desc.MediaType, + Annotations: desc.Annotations, + } + mu.Unlock() + return nil + }) + } + + if err1 := eg.Wait(); err == nil { + err = err1 + } + + defer func() { + for _, f := range releasers { + f() + } + }() + + if err != nil { + st, ok := grpcerrors.AsGRPCStatus(grpcerrors.ToGRPC(err)) + if !ok { + st = status.New(codes.Unknown, err.Error()) + } + rec.Error = grpcerrors.ToRPCStatus(st.Proto()) + } + if err1 := s.history.Update(ctx, &controlapi.BuildHistoryEvent{ + Type: controlapi.BuildHistoryEventType_COMPLETE, + Record: rec, + }); err1 != nil { + if err == nil { + err = err1 + } + } + + if stopTrace == nil { + logrus.Warn("no trace recorder found, skipping") + return err + } + go func() { + time.Sleep(3 * time.Second) + spans := stopTrace() + + if len(spans) == 0 { + return + } + + if err := func() error { + w, err := s.history.OpenBlobWriter(context.TODO(), "application/vnd.buildkit.otlp.json.v0") + if err != nil { + return err + } + enc := json.NewEncoder(w) + enc.SetIndent("", " ") + for _, sp := range spans { + if err := enc.Encode(sp); err != nil { + return err + } + } + + desc, release, err := w.Commit(context.TODO()) + if err != nil { + return err + } + defer release() + + if err := s.history.UpdateRef(context.TODO(), id, func(rec *controlapi.BuildHistoryRecord) error { + rec.Trace = &controlapi.Descriptor{ + Digest: desc.Digest, + MediaType: desc.MediaType, + Size_: desc.Size, + } + return nil + }); err != nil { + return err + } + return nil + }(); err != nil { + logrus.Errorf("failed to save trace for %s: %+v", id, err) + } + }() + + return err + }, nil +} + +func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req frontend.SolveRequest, exp ExporterRequest, ent []entitlements.Entitlement, post []Processor, internal bool, srcPol *spb.Policy) (_ *client.SolveResponse, err error) { j, err := s.solver.NewJob(id) if err != nil { return nil, err @@ -101,17 +407,48 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro defer j.Discard() + var res *frontend.Result + var resProv *Result + var descref exporter.DescriptorReference + + var releasers []func() + defer func() { + for _, f := range releasers { + f() + } + if descref != nil { + descref.Release() + } + }() + + if internal { + defer j.CloseProgress() + } else { + rec, err1 := s.recordBuildHistory(ctx, id, req, exp, j) + if err != nil { + defer j.CloseProgress() + return nil, err1 + } + defer func() { + err = rec(resProv, descref, err) + }() + } + set, err := entitlements.WhiteList(ent, supportedEntitlements(s.entitlements)) if err != nil { return nil, err } j.SetValue(keyEntitlements, set) + if srcPol != nil { + j.SetValue(keySourcePolicy, *srcPol) + } + j.SessionID = sessionID - var res *frontend.Result + br := s.bridge(j) if s.gatewayForwarder != nil && req.Definition == nil && req.Frontend == "" { - fwd := gateway.NewBridgeForwarder(ctx, s.Bridge(j), s.workerController, req.FrontendInputs, sessionID, s.sm) + fwd := gateway.NewBridgeForwarder(ctx, br, s.workerController, req.FrontendInputs, sessionID, s.sm) defer fwd.Discard() if err := s.gatewayForwarder.RegisterBuild(ctx, id, fwd); err != nil { return nil, err @@ -129,7 +466,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro return nil, err } } else { - res, err = s.Bridge(j).Solve(ctx, req, sessionID) + res, err = br.Solve(ctx, req, sessionID) if err != nil { return nil, err } @@ -139,12 +476,12 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro res = &frontend.Result{} } - defer func() { + releasers = append(releasers, func() { res.EachRef(func(ref solver.ResultProxy) error { go ref.Release(context.TODO()) return nil }) - }() + }) eg, ctx2 := errgroup.WithContext(ctx) res.EachRef(func(ref solver.ResultProxy) error { @@ -158,149 +495,60 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro return nil, err } - if r := res.Ref; r != nil { - dtbi, err := buildinfo.Encode(ctx, res.Metadata, exptypes.ExporterBuildInfo, r.BuildSources()) + resProv, err = addProvenanceToResult(res, br) + if err != nil { + return nil, err + } + + for _, post := range post { + res2, err := post(ctx, resProv, s, j) if err != nil { return nil, err } - if dtbi != nil && len(dtbi) > 0 { - if res.Metadata == nil { - res.Metadata = make(map[string][]byte) - } - res.Metadata[exptypes.ExporterBuildInfo] = dtbi - } + resProv = res2 } - if res.Refs != nil { - for k, r := range res.Refs { - if r == nil { - continue - } - dtbi, err := buildinfo.Encode(ctx, res.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), r.BuildSources()) - if err != nil { - return nil, err - } - if dtbi != nil && len(dtbi) > 0 { - if res.Metadata == nil { - res.Metadata = make(map[string][]byte) - } - res.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k)] = dtbi - } - } + res = resProv.Result + + cached, err := result.ConvertResult(res, func(res solver.ResultProxy) (solver.CachedResult, error) { + return res.Result(ctx) + }) + if err != nil { + return nil, err } + inp, err := result.ConvertResult(cached, func(res solver.CachedResult) (cache.ImmutableRef, error) { + workerRef, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference: %T", res.Sys()) + } + return workerRef.ImmutableRef, nil + }) + if err != nil { + return nil, err + } + + cacheExporters, inlineCacheExporter := splitCacheExporters(exp.CacheExporters) var exporterResponse map[string]string if e := exp.Exporter; e != nil { - inp := exporter.Source{ - Metadata: res.Metadata, + meta, err := runInlineCacheExporter(ctx, e, inlineCacheExporter, j, cached) + if err != nil { + return nil, err } - if inp.Metadata == nil { - inp.Metadata = make(map[string][]byte) + for k, v := range meta { + inp.AddMeta(k, v) } - var cr solver.CachedResult - var crMap = map[string]solver.CachedResult{} - if res := res.Ref; res != nil { - r, err := res.Result(ctx) - if err != nil { - return nil, err - } - workerRef, ok := r.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid reference: %T", r.Sys()) - } - inp.Ref = workerRef.ImmutableRef - cr = r - } - if res.Refs != nil { - m := make(map[string]cache.ImmutableRef, len(res.Refs)) - for k, res := range res.Refs { - if res == nil { - m[k] = nil - } else { - r, err := res.Result(ctx) - if err != nil { - return nil, err - } - workerRef, ok := r.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid reference: %T", r.Sys()) - } - m[k] = workerRef.ImmutableRef - crMap[k] = r - } - } - inp.Refs = m - } - if _, ok := asInlineCache(exp.CacheExporter); ok { - if err := inBuilderContext(ctx, j, "preparing layers for inline cache", "", func(ctx context.Context, _ session.Group) error { - if cr != nil { - dtic, err := inlineCache(ctx, exp.CacheExporter, cr, e.Config().Compression, session.NewGroup(sessionID)) - if err != nil { - return err - } - if dtic != nil { - inp.Metadata[exptypes.ExporterInlineCache] = dtic - } - } - for k, res := range crMap { - dtic, err := inlineCache(ctx, exp.CacheExporter, res, e.Config().Compression, session.NewGroup(sessionID)) - if err != nil { - return err - } - if dtic != nil { - inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterInlineCache, k)] = dtic - } - } - exp.CacheExporter = nil - return nil - }); err != nil { - return nil, err - } - } - if err := inBuilderContext(ctx, j, e.Name(), "", func(ctx context.Context, _ session.Group) error { - exporterResponse, err = e.Export(ctx, inp, j.SessionID) + + if err := inBuilderContext(ctx, j, e.Name(), j.SessionID+"-export", func(ctx context.Context, _ session.Group) error { + exporterResponse, descref, err = e.Export(ctx, inp, j.SessionID) return err }); err != nil { return nil, err } } - g := session.NewGroup(j.SessionID) - var cacheExporterResponse map[string]string - if e := exp.CacheExporter; e != nil { - if err := inBuilderContext(ctx, j, "exporting cache", "", func(ctx context.Context, _ session.Group) error { - prepareDone := oneOffProgress(ctx, "preparing build cache for export") - if err := res.EachRef(func(res solver.ResultProxy) error { - r, err := res.Result(ctx) - if err != nil { - return err - } - - workerRef, ok := r.Sys().(*worker.WorkerRef) - if !ok { - return errors.Errorf("invalid reference: %T", r.Sys()) - } - ctx = withDescHandlerCacheOpts(ctx, workerRef.ImmutableRef) - - // Configure compression - compressionConfig := e.Config().Compression - - // all keys have same export chain so exporting others is not needed - _, err = r.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{ - ResolveRemotes: workerRefResolver(cacheconfig.RefConfig{Compression: compressionConfig}, false, g), - Mode: exp.CacheExportMode, - Session: g, - CompressionOpt: &compressionConfig, - }) - return err - }); err != nil { - return prepareDone(err) - } - prepareDone(nil) - cacheExporterResponse, err = e.Finalize(ctx) - return err - }); err != nil { - return nil, err - } + cacheExporterResponse, err := runCacheExporters(ctx, cacheExporters, j, cached, inp) + if err != nil { + return nil, err } if exporterResponse == nil { @@ -326,6 +574,235 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro }, nil } +func runCacheExporters(ctx context.Context, exporters []RemoteCacheExporter, j *solver.Job, cached *result.Result[solver.CachedResult], inp *result.Result[cache.ImmutableRef]) (map[string]string, error) { + eg, ctx := errgroup.WithContext(ctx) + g := session.NewGroup(j.SessionID) + var cacheExporterResponse map[string]string + resps := make([]map[string]string, len(exporters)) + for i, exp := range exporters { + func(exp RemoteCacheExporter, i int) { + eg.Go(func() (err error) { + id := fmt.Sprint(j.SessionID, "-cache-", i) + err = inBuilderContext(ctx, j, exp.Exporter.Name(), id, func(ctx context.Context, _ session.Group) error { + prepareDone := progress.OneOff(ctx, "preparing build cache for export") + if err := result.EachRef(cached, inp, func(res solver.CachedResult, ref cache.ImmutableRef) error { + ctx = withDescHandlerCacheOpts(ctx, ref) + + // Configure compression + compressionConfig := exp.Config().Compression + + // all keys have same export chain so exporting others is not needed + _, err = res.CacheKeys()[0].Exporter.ExportTo(ctx, exp, solver.CacheExportOpt{ + ResolveRemotes: workerRefResolver(cacheconfig.RefConfig{Compression: compressionConfig}, false, g), + Mode: exp.CacheExportMode, + Session: g, + CompressionOpt: &compressionConfig, + }) + return err + }); err != nil { + return prepareDone(err) + } + resps[i], err = exp.Finalize(ctx) + return prepareDone(err) + }) + if exp.IgnoreError { + err = nil + } + return err + }) + }(exp, i) + } + if err := eg.Wait(); err != nil { + return nil, err + } + for _, resp := range resps { + for k, v := range resp { + if cacheExporterResponse == nil { + cacheExporterResponse = make(map[string]string) + } + cacheExporterResponse[k] = v + } + } + return cacheExporterResponse, nil +} + +func runInlineCacheExporter(ctx context.Context, e exporter.ExporterInstance, inlineExporter *RemoteCacheExporter, j *solver.Job, cached *result.Result[solver.CachedResult]) (map[string][]byte, error) { + meta := map[string][]byte{} + if inlineExporter == nil { + return nil, nil + } + if err := inBuilderContext(ctx, j, "preparing layers for inline cache", j.SessionID+"-cache-inline", func(ctx context.Context, _ session.Group) error { + if res := cached.Ref; res != nil { + dtic, err := inlineCache(ctx, inlineExporter.Exporter, res, e.Config().Compression(), session.NewGroup(j.SessionID)) + if err != nil { + return err + } + if dtic != nil { + meta[exptypes.ExporterInlineCache] = dtic + } + } + for k, res := range cached.Refs { + dtic, err := inlineCache(ctx, inlineExporter.Exporter, res, e.Config().Compression(), session.NewGroup(j.SessionID)) + if err != nil { + return err + } + if dtic != nil { + meta[fmt.Sprintf("%s/%s", exptypes.ExporterInlineCache, k)] = dtic + } + } + return nil + }); err != nil { + return nil, err + } + return meta, nil +} + +func splitCacheExporters(exporters []RemoteCacheExporter) (rest []RemoteCacheExporter, inline *RemoteCacheExporter) { + rest = make([]RemoteCacheExporter, 0, len(exporters)) + for i, exp := range exporters { + if _, ok := asInlineCache(exp.Exporter); ok { + inline = &exporters[i] + continue + } + rest = append(rest, exp) + } + return rest, inline +} + +func addProvenanceToResult(res *frontend.Result, br *provenanceBridge) (*Result, error) { + if res == nil { + return nil, nil + } + reqs, err := br.requests(res) + if err != nil { + return nil, err + } + out := &Result{ + Result: res, + Provenance: &provenance.Result{}, + } + + if res.Ref != nil { + cp, err := getProvenance(res.Ref, reqs.ref.bridge, "", reqs) + if err != nil { + return nil, err + } + out.Provenance.Ref = cp + if res.Metadata == nil { + res.Metadata = map[string][]byte{} + } + if err := buildinfo.AddMetadata(res.Metadata, exptypes.ExporterBuildInfo, cp); err != nil { + return nil, err + } + } + + if len(res.Refs) != 0 { + out.Provenance.Refs = make(map[string]*provenance.Capture, len(res.Refs)) + } + for k, ref := range res.Refs { + cp, err := getProvenance(ref, reqs.refs[k].bridge, k, reqs) + if err != nil { + return nil, err + } + out.Provenance.Refs[k] = cp + if res.Metadata == nil { + res.Metadata = map[string][]byte{} + } + if err := buildinfo.AddMetadata(res.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), cp); err != nil { + return nil, err + } + } + + if len(res.Attestations) != 0 { + out.Provenance.Attestations = make(map[string][]result.Attestation[*provenance.Capture], len(res.Attestations)) + } + for k, as := range res.Attestations { + for i, a := range as { + a2, err := result.ConvertAttestation(&a, func(r solver.ResultProxy) (*provenance.Capture, error) { + return getProvenance(r, reqs.atts[k][i].bridge, k, reqs) + }) + if err != nil { + return nil, err + } + out.Provenance.Attestations[k] = append(out.Provenance.Attestations[k], *a2) + } + } + + return out, nil +} + +func getRefProvenance(ref solver.ResultProxy, br *provenanceBridge) (*provenance.Capture, error) { + if ref == nil { + return nil, nil + } + p := ref.Provenance() + if p == nil { + return nil, errors.Errorf("missing provenance for %s", ref.ID()) + } + pr, ok := p.(*provenance.Capture) + if !ok { + return nil, errors.Errorf("invalid provenance type %T", p) + } + + if br.req != nil { + if pr == nil { + return nil, errors.Errorf("missing provenance for %s", ref.ID()) + } + + pr.Frontend = br.req.Frontend + pr.Args = provenance.FilterArgs(br.req.FrontendOpt) + // TODO: should also save some output options like compression + + if len(br.req.FrontendInputs) > 0 { + pr.IncompleteMaterials = true // not implemented + } + } + + return pr, nil +} + +func getProvenance(ref solver.ResultProxy, br *provenanceBridge, id string, reqs *resultRequests) (*provenance.Capture, error) { + pr, err := getRefProvenance(ref, br) + if err != nil { + return nil, err + } + if pr == nil { + return nil, nil + } + + visited := reqs.allRes() + visited[ref.ID()] = struct{}{} + // provenance for all the refs not directly in the result needs to be captured as well + if err := br.eachRef(func(r solver.ResultProxy) error { + if _, ok := visited[r.ID()]; ok { + return nil + } + visited[r.ID()] = struct{}{} + pr2, err := getRefProvenance(r, br) + if err != nil { + return err + } + return pr.Merge(pr2) + }); err != nil { + return nil, err + } + + imgs := br.allImages() + if id != "" { + imgs = reqs.filterImagePlatforms(id, imgs) + } + for _, img := range imgs { + pr.AddImage(img) + } + + if err := pr.OptimizeImageSources(); err != nil { + return nil, err + } + pr.Sort() + + return pr, nil +} + type inlineCacheExporter interface { ExportForLayers(context.Context, []digest.Digest) ([]byte, error) } @@ -384,6 +861,15 @@ func withDescHandlerCacheOpts(ctx context.Context, ref cache.ImmutableRef) conte } func (s *Solver) Status(ctx context.Context, id string, statusChan chan *client.SolveStatus) error { + if err := s.history.Status(ctx, id, statusChan); err != nil { + if !errors.Is(err, os.ErrNotExist) { + close(statusChan) + return err + } + } else { + close(statusChan) + return nil + } j, err := s.solver.Get(id) if err != nil { close(statusChan) @@ -412,23 +898,6 @@ func allWorkers(wc *worker.Controller) func(func(w worker.Worker) error) error { } } -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.NewFromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - // TODO: set error on status - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } -} - func inBuilderContext(ctx context.Context, b solver.Builder, name, id string, f func(ctx context.Context, g session.Group) error) error { if id == "" { id = name @@ -497,3 +966,29 @@ func loadEntitlements(b solver.Builder) (entitlements.Set, error) { } return ent, nil } + +func loadSourcePolicy(b solver.Builder) (*spb.Policy, error) { + set := make(map[spb.Rule]struct{}, 0) + err := b.EachValue(context.TODO(), keySourcePolicy, func(v interface{}) error { + x, ok := v.(spb.Policy) + if !ok { + return errors.Errorf("invalid source policy %T", v) + } + for _, f := range x.Rules { + set[*f] = struct{}{} + } + return nil + }) + if err != nil { + return nil, err + } + var srcPol *spb.Policy + if len(set) > 0 { + srcPol = &spb.Policy{} + for k := range set { + k := k + srcPol.Rules = append(srcPol.Rules, &k) + } + } + return srcPol, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/sourcepolicy.go b/vendor/github.com/moby/buildkit/solver/llbsolver/sourcepolicy.go new file mode 100644 index 0000000000..11a49616b3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/sourcepolicy.go @@ -0,0 +1,11 @@ +package llbsolver + +import ( + "context" + + "github.com/moby/buildkit/solver/pb" +) + +type SourcePolicyEvaluator interface { + Evaluate(ctx context.Context, op *pb.Op) (bool, error) +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go b/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go index 4f36c2eddb..6901332d2b 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go @@ -1,11 +1,13 @@ package llbsolver import ( + "context" "fmt" "strings" "github.com/containerd/containerd/platforms" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/ops/opsutils" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/source" "github.com/moby/buildkit/util/entitlements" @@ -143,8 +145,8 @@ func (dpc *detectPrunedCacheID) Load(op *pb.Op, md *pb.OpMetadata, opt *solver.V return nil } -func Load(def *pb.Definition, opts ...LoadOpt) (solver.Edge, error) { - return loadLLB(def, func(dgst digest.Digest, pbOp *pb.Op, load func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error) { +func Load(ctx context.Context, def *pb.Definition, polEngine SourcePolicyEvaluator, opts ...LoadOpt) (solver.Edge, error) { + return loadLLB(ctx, def, polEngine, func(dgst digest.Digest, pbOp *pb.Op, load func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error) { opMetadata := def.Metadata[dgst] vtx, err := newVertex(dgst, pbOp, &opMetadata, load, opts...) if err != nil { @@ -185,36 +187,105 @@ func newVertex(dgst digest.Digest, op *pb.Op, opMeta *pb.OpMetadata, load func(d return vtx, nil } +func recomputeDigests(ctx context.Context, all map[digest.Digest]*pb.Op, visited map[digest.Digest]digest.Digest, dgst digest.Digest) (digest.Digest, error) { + if dgst, ok := visited[dgst]; ok { + return dgst, nil + } + op := all[dgst] + + var mutated bool + for _, input := range op.Inputs { + if ctx.Err() != nil { + return "", ctx.Err() + } + + iDgst, err := recomputeDigests(ctx, all, visited, input.Digest) + if err != nil { + return "", err + } + if input.Digest != iDgst { + mutated = true + input.Digest = iDgst + } + } + + if !mutated { + return dgst, nil + } + + dt, err := op.Marshal() + if err != nil { + return "", err + } + newDgst := digest.FromBytes(dt) + visited[dgst] = newDgst + all[newDgst] = op + delete(all, dgst) + return newDgst, nil +} + // loadLLB loads LLB. // fn is executed sequentially. -func loadLLB(def *pb.Definition, fn func(digest.Digest, *pb.Op, func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error)) (solver.Edge, error) { +func loadLLB(ctx context.Context, def *pb.Definition, polEngine SourcePolicyEvaluator, fn func(digest.Digest, *pb.Op, func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error)) (solver.Edge, error) { if len(def.Def) == 0 { return solver.Edge{}, errors.New("invalid empty definition") } allOps := make(map[digest.Digest]*pb.Op) + mutatedDigests := make(map[digest.Digest]digest.Digest) // key: old, val: new - var dgst digest.Digest + var lastDgst digest.Digest for _, dt := range def.Def { var op pb.Op if err := (&op).Unmarshal(dt); err != nil { return solver.Edge{}, errors.Wrap(err, "failed to parse llb proto op") } - dgst = digest.FromBytes(dt) + dgst := digest.FromBytes(dt) + if polEngine != nil { + mutated, err := polEngine.Evaluate(ctx, &op) + if err != nil { + return solver.Edge{}, errors.Wrap(err, "error evaluating the source policy") + } + if mutated { + dtMutated, err := op.Marshal() + if err != nil { + return solver.Edge{}, err + } + dgstMutated := digest.FromBytes(dtMutated) + mutatedDigests[dgst] = dgstMutated + dgst = dgstMutated + } + } allOps[dgst] = &op + lastDgst = dgst + } + + for dgst := range allOps { + _, err := recomputeDigests(ctx, allOps, mutatedDigests, dgst) + if err != nil { + return solver.Edge{}, err + } } if len(allOps) < 2 { return solver.Edge{}, errors.Errorf("invalid LLB with %d vertexes", len(allOps)) } - lastOp := allOps[dgst] - delete(allOps, dgst) + for { + newDgst, ok := mutatedDigests[lastDgst] + if !ok { + break + } + lastDgst = newDgst + } + + lastOp := allOps[lastDgst] + delete(allOps, lastDgst) if len(lastOp.Inputs) == 0 { return solver.Edge{}, errors.Errorf("invalid LLB with no inputs on last vertex") } - dgst = lastOp.Inputs[0].Digest + dgst := lastOp.Inputs[0].Digest cache := make(map[digest.Digest]solver.Vertex) @@ -228,7 +299,7 @@ func loadLLB(def *pb.Definition, fn func(digest.Digest, *pb.Op, func(digest.Dige return nil, errors.Errorf("invalid missing input digest %s", dgst) } - if err := ValidateOp(op); err != nil { + if err := opsutils.Validate(op); err != nil { return nil, err } @@ -301,63 +372,6 @@ func llbOpName(pbOp *pb.Op, load func(digest.Digest) (solver.Vertex, error)) (st } } -func ValidateOp(op *pb.Op) error { - if op == nil { - return errors.Errorf("invalid nil op") - } - - switch op := op.Op.(type) { - case *pb.Op_Source: - if op.Source == nil { - return errors.Errorf("invalid nil source op") - } - case *pb.Op_Exec: - if op.Exec == nil { - return errors.Errorf("invalid nil exec op") - } - if op.Exec.Meta == nil { - return errors.Errorf("invalid exec op with no meta") - } - if len(op.Exec.Meta.Args) == 0 { - return errors.Errorf("invalid exec op with no args") - } - if len(op.Exec.Mounts) == 0 { - return errors.Errorf("invalid exec op with no mounts") - } - - isRoot := false - for _, m := range op.Exec.Mounts { - if m.Dest == pb.RootMount { - isRoot = true - break - } - } - if !isRoot { - return errors.Errorf("invalid exec op with no rootfs") - } - case *pb.Op_File: - if op.File == nil { - return errors.Errorf("invalid nil file op") - } - if len(op.File.Actions) == 0 { - return errors.Errorf("invalid file op with no actions") - } - case *pb.Op_Build: - if op.Build == nil { - return errors.Errorf("invalid nil build op") - } - case *pb.Op_Merge: - if op.Merge == nil { - return errors.Errorf("invalid nil merge op") - } - case *pb.Op_Diff: - if op.Diff == nil { - return errors.Errorf("invalid nil diff op") - } - } - return nil -} - func fileOpName(actions []*pb.FileAction) string { names := make([]string, 0, len(actions)) for _, action := range actions { diff --git a/vendor/github.com/moby/buildkit/solver/pb/attr.go b/vendor/github.com/moby/buildkit/solver/pb/attr.go index aa08a0e828..85e7cce60e 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/attr.go +++ b/vendor/github.com/moby/buildkit/solver/pb/attr.go @@ -26,6 +26,11 @@ const AttrImageResolveModeDefault = "default" const AttrImageResolveModeForcePull = "pull" const AttrImageResolveModePreferLocal = "local" const AttrImageRecordType = "image.recordtype" +const AttrImageLayerLimit = "image.layerlimit" + +const AttrOCILayoutSessionID = "oci.session" +const AttrOCILayoutStoreID = "oci.store" +const AttrOCILayoutLayerLimit = "oci.layerlimit" const AttrLocalDiffer = "local.differ" const AttrLocalDifferNone = "none" diff --git a/vendor/github.com/moby/buildkit/solver/pb/caps.go b/vendor/github.com/moby/buildkit/solver/pb/caps.go index 24b2789348..02380a4bab 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/caps.go +++ b/vendor/github.com/moby/buildkit/solver/pb/caps.go @@ -9,8 +9,10 @@ var Caps apicaps.CapList // considered immutable. After a capability is marked stable it should not be disabled. const ( - CapSourceImage apicaps.CapID = "source.image" - CapSourceImageResolveMode apicaps.CapID = "source.image.resolvemode" + CapSourceImage apicaps.CapID = "source.image" + CapSourceImageResolveMode apicaps.CapID = "source.image.resolvemode" + CapSourceImageLayerLimit apicaps.CapID = "source.image.layerlimit" + CapSourceLocal apicaps.CapID = "source.local" CapSourceLocalUnique apicaps.CapID = "source.local.unique" CapSourceLocalSessionID apicaps.CapID = "source.local.sessionid" @@ -33,6 +35,8 @@ const ( CapSourceHTTPPerm apicaps.CapID = "source.http.perm" CapSourceHTTPUIDGID apicaps.CapID = "soruce.http.uidgid" + CapSourceOCILayout apicaps.CapID = "source.ocilayout" + CapBuildOpLLBFileName apicaps.CapID = "source.buildop.llbfilename" CapExecMetaBase apicaps.CapID = "exec.meta.base" @@ -43,6 +47,7 @@ const ( CapExecMetaSecurityDeviceWhitelistV1 apicaps.CapID = "exec.meta.security.devices.v1" CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath" CapExecMetaUlimit apicaps.CapID = "exec.meta.ulimit" + CapExecMetaRemoveMountStubsRecursive apicaps.CapID = "exec.meta.removemountstubs.recursive" CapExecMountBind apicaps.CapID = "exec.mount.bind" CapExecMountBindReadWriteNoOuput apicaps.CapID = "exec.mount.bind.readwrite-nooutput" CapExecMountCache apicaps.CapID = "exec.mount.cache" @@ -67,10 +72,20 @@ const ( CapMetaDescription apicaps.CapID = "meta.description" CapMetaExportCache apicaps.CapID = "meta.exportcache" - CapRemoteCacheGHA apicaps.CapID = "cache.gha" + CapRemoteCacheGHA apicaps.CapID = "cache.gha" + CapRemoteCacheS3 apicaps.CapID = "cache.s3" + CapRemoteCacheAzBlob apicaps.CapID = "cache.azblob" CapMergeOp apicaps.CapID = "mergeop" CapDiffOp apicaps.CapID = "diffop" + + CapAnnotations apicaps.CapID = "exporter.image.annotations" + CapAttestations apicaps.CapID = "exporter.image.attestations" + + // CapSourceDateEpoch is the capability to automatically handle the date epoch + CapSourceDateEpoch apicaps.CapID = "exporter.sourcedateepoch" + + CapSourcePolicy apicaps.CapID = "source.policy" ) func init() { @@ -86,6 +101,12 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapSourceImageLayerLimit, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapSourceLocal, Enabled: true, @@ -194,6 +215,12 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapSourceOCILayout, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapSourceHTTPUIDGID, Enabled: true, @@ -383,14 +410,53 @@ func init() { Enabled: true, Status: apicaps.CapStatusExperimental, }) + + Caps.Init(apicaps.Cap{ + ID: CapRemoteCacheS3, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapRemoteCacheAzBlob, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapMergeOp, Enabled: true, Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ ID: CapDiffOp, Enabled: true, Status: apicaps.CapStatusExperimental, }) + + Caps.Init(apicaps.Cap{ + ID: CapAnnotations, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapAttestations, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceDateEpoch, + Name: "source date epoch", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourcePolicy, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) } diff --git a/vendor/github.com/moby/buildkit/solver/pb/generate.go b/vendor/github.com/moby/buildkit/solver/pb/generate.go index c31e148f2a..88adaa2702 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/generate.go +++ b/vendor/github.com/moby/buildkit/solver/pb/generate.go @@ -1,3 +1,3 @@ package pb -//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. ops.proto +//go:generate protoc -I=. -I=../../vendor/ -I=../../vendor/github.com/gogo/protobuf/ --gogofaster_out=. ops.proto diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go index 252227a944..e8afea0233 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go @@ -154,6 +154,7 @@ type Op struct { // inputs is a set of input edges. Inputs []*Input `protobuf:"bytes,1,rep,name=inputs,proto3" json:"inputs,omitempty"` // Types that are valid to be assigned to Op: + // // *Op_Exec // *Op_Source // *Op_File @@ -495,15 +496,16 @@ func (m *ExecOp) GetSecretenv() []*SecretEnv { // Meta is unrelated to LLB metadata. // FIXME: rename (ExecContext? ExecArgs?) type Meta struct { - Args []string `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"` - Env []string `protobuf:"bytes,2,rep,name=env,proto3" json:"env,omitempty"` - Cwd string `protobuf:"bytes,3,opt,name=cwd,proto3" json:"cwd,omitempty"` - User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` - ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv,proto3" json:"proxy_env,omitempty"` - ExtraHosts []*HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` - Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"` - Ulimit []*Ulimit `protobuf:"bytes,9,rep,name=ulimit,proto3" json:"ulimit,omitempty"` - CgroupParent string `protobuf:"bytes,10,opt,name=cgroupParent,proto3" json:"cgroupParent,omitempty"` + Args []string `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"` + Env []string `protobuf:"bytes,2,rep,name=env,proto3" json:"env,omitempty"` + Cwd string `protobuf:"bytes,3,opt,name=cwd,proto3" json:"cwd,omitempty"` + User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` + ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv,proto3" json:"proxy_env,omitempty"` + ExtraHosts []*HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` + Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"` + Ulimit []*Ulimit `protobuf:"bytes,9,rep,name=ulimit,proto3" json:"ulimit,omitempty"` + CgroupParent string `protobuf:"bytes,10,opt,name=cgroupParent,proto3" json:"cgroupParent,omitempty"` + RemoveMountStubsRecursive bool `protobuf:"varint,11,opt,name=removeMountStubsRecursive,proto3" json:"removeMountStubsRecursive,omitempty"` } func (m *Meta) Reset() { *m = Meta{} } @@ -598,6 +600,13 @@ func (m *Meta) GetCgroupParent() string { return "" } +func (m *Meta) GetRemoveMountStubsRecursive() bool { + if m != nil { + return m.RemoveMountStubsRecursive + } + return false +} + type HostIP struct { Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` IP string `protobuf:"bytes,2,opt,name=IP,proto3" json:"IP,omitempty"` @@ -1038,7 +1047,7 @@ func (m *SecretOpt) GetOptional() bool { return false } -// SSHOpt defines options describing secret mounts +// SSHOpt defines options describing ssh mounts type SSHOpt struct { // ID of exposed ssh rule. Used for quering the value. ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1586,8 +1595,8 @@ func (m *Range) GetEnd() Position { // Position is single location in a source file type Position struct { - Line int32 `protobuf:"varint,1,opt,name=Line,proto3" json:"Line,omitempty"` - Character int32 `protobuf:"varint,2,opt,name=Character,proto3" json:"Character,omitempty"` + Line int32 `protobuf:"varint,1,opt,name=line,proto3" json:"line,omitempty"` + Character int32 `protobuf:"varint,2,opt,name=character,proto3" json:"character,omitempty"` } func (m *Position) Reset() { *m = Position{} } @@ -1948,6 +1957,7 @@ type FileAction struct { SecondaryInput InputIndex `protobuf:"varint,2,opt,name=secondaryInput,proto3,customtype=InputIndex" json:"secondaryInput"` Output OutputIndex `protobuf:"varint,3,opt,name=output,proto3,customtype=OutputIndex" json:"output"` // Types that are valid to be assigned to Action: + // // *FileAction_Copy // *FileAction_Mkfile // *FileAction_Mkdir @@ -2465,6 +2475,7 @@ func (m *ChownOpt) GetGroup() *UserOpt { type UserOpt struct { // Types that are valid to be assigned to User: + // // *UserOpt_ByName // *UserOpt_ByID User isUserOpt_User `protobuf_oneof:"user"` @@ -2831,166 +2842,168 @@ func init() { func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) } var fileDescriptor_8de16154b2733812 = []byte{ - // 2538 bytes of a gzipped FileDescriptorProto + // 2564 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0xcf, 0x6f, 0x5b, 0xc7, - 0xf1, 0x17, 0x7f, 0x93, 0x43, 0x89, 0x66, 0xd6, 0x4e, 0xc2, 0xe8, 0xeb, 0xaf, 0xac, 0xbc, 0xe4, - 0x1b, 0xc8, 0xb2, 0x2d, 0xe1, 0xab, 0x00, 0x71, 0x60, 0x14, 0x45, 0x25, 0x91, 0x8e, 0x18, 0xdb, - 0xa2, 0xb0, 0xb4, 0x9d, 0x1e, 0x0a, 0x18, 0x4f, 0x8f, 0x4b, 0xe9, 0x41, 0x8f, 0x6f, 0x1f, 0xf6, - 0x2d, 0x23, 0xb1, 0x87, 0x1e, 0x7a, 0x2f, 0x10, 0xa0, 0x40, 0xd1, 0x4b, 0xd1, 0x7f, 0xa2, 0xc7, - 0xf6, 0x1e, 0xa0, 0x97, 0x1c, 0x7a, 0x08, 0x7a, 0x48, 0x0b, 0xe7, 0xd2, 0x3f, 0xa2, 0x05, 0x8a, - 0x99, 0xdd, 0xf7, 0x83, 0x94, 0x02, 0xc7, 0x6d, 0xd1, 0x13, 0xe7, 0xcd, 0x7c, 0x76, 0x66, 0x76, - 0x77, 0x66, 0x67, 0x76, 0x09, 0x0d, 0x19, 0xc5, 0x5b, 0x91, 0x92, 0x5a, 0xb2, 0x62, 0x74, 0xbc, - 0x7a, 0xef, 0xc4, 0xd7, 0xa7, 0xd3, 0xe3, 0x2d, 0x4f, 0x4e, 0xb6, 0x4f, 0xe4, 0x89, 0xdc, 0x26, - 0xd1, 0xf1, 0x74, 0x4c, 0x5f, 0xf4, 0x41, 0x94, 0x19, 0xe2, 0xfc, 0xad, 0x08, 0xc5, 0x41, 0xc4, - 0xde, 0x85, 0xaa, 0x1f, 0x46, 0x53, 0x1d, 0x77, 0x0a, 0xeb, 0xa5, 0x8d, 0xe6, 0x4e, 0x63, 0x2b, - 0x3a, 0xde, 0xea, 0x23, 0x87, 0x5b, 0x01, 0x5b, 0x87, 0xb2, 0xb8, 0x10, 0x5e, 0xa7, 0xb8, 0x5e, - 0xd8, 0x68, 0xee, 0x00, 0x02, 0x7a, 0x17, 0xc2, 0x1b, 0x44, 0x07, 0x4b, 0x9c, 0x24, 0xec, 0x03, - 0xa8, 0xc6, 0x72, 0xaa, 0x3c, 0xd1, 0x29, 0x11, 0x66, 0x19, 0x31, 0x43, 0xe2, 0x10, 0xca, 0x4a, - 0x51, 0xd3, 0xd8, 0x0f, 0x44, 0xa7, 0x9c, 0x69, 0x7a, 0xe8, 0x07, 0x06, 0x43, 0x12, 0xf6, 0x1e, - 0x54, 0x8e, 0xa7, 0x7e, 0x30, 0xea, 0x54, 0x08, 0xd2, 0x44, 0xc8, 0x1e, 0x32, 0x08, 0x63, 0x64, - 0x08, 0x9a, 0x08, 0x75, 0x22, 0x3a, 0xd5, 0x0c, 0xf4, 0x04, 0x19, 0x06, 0x44, 0x32, 0xb4, 0x35, - 0xf2, 0xc7, 0xe3, 0x4e, 0x2d, 0xb3, 0xd5, 0xf5, 0xc7, 0x63, 0x63, 0x0b, 0x25, 0x6c, 0x03, 0xea, - 0x51, 0xe0, 0xea, 0xb1, 0x54, 0x93, 0x0e, 0x64, 0x7e, 0x1f, 0x59, 0x1e, 0x4f, 0xa5, 0xec, 0x3e, - 0x34, 0x3d, 0x19, 0xc6, 0x5a, 0xb9, 0x7e, 0xa8, 0xe3, 0x4e, 0x93, 0xc0, 0x6f, 0x22, 0xf8, 0x33, - 0xa9, 0xce, 0x84, 0xda, 0xcf, 0x84, 0x3c, 0x8f, 0xdc, 0x2b, 0x43, 0x51, 0x46, 0xce, 0xaf, 0x0a, - 0x50, 0x4f, 0xb4, 0x32, 0x07, 0x96, 0x77, 0x95, 0x77, 0xea, 0x6b, 0xe1, 0xe9, 0xa9, 0x12, 0x9d, - 0xc2, 0x7a, 0x61, 0xa3, 0xc1, 0xe7, 0x78, 0xac, 0x05, 0xc5, 0xc1, 0x90, 0xd6, 0xbb, 0xc1, 0x8b, - 0x83, 0x21, 0xeb, 0x40, 0xed, 0xb9, 0xab, 0x7c, 0x37, 0xd4, 0xb4, 0xc0, 0x0d, 0x9e, 0x7c, 0xb2, - 0x9b, 0xd0, 0x18, 0x0c, 0x9f, 0x0b, 0x15, 0xfb, 0x32, 0xa4, 0x65, 0x6d, 0xf0, 0x8c, 0xc1, 0xd6, - 0x00, 0x06, 0xc3, 0x87, 0xc2, 0x45, 0xa5, 0x71, 0xa7, 0xb2, 0x5e, 0xda, 0x68, 0xf0, 0x1c, 0xc7, - 0xf9, 0x19, 0x54, 0x68, 0xab, 0xd9, 0xa7, 0x50, 0x1d, 0xf9, 0x27, 0x22, 0xd6, 0xc6, 0x9d, 0xbd, - 0x9d, 0x2f, 0xbf, 0xb9, 0xb5, 0xf4, 0xe7, 0x6f, 0x6e, 0x6d, 0xe6, 0x62, 0x4a, 0x46, 0x22, 0xf4, - 0x64, 0xa8, 0x5d, 0x3f, 0x14, 0x2a, 0xde, 0x3e, 0x91, 0xf7, 0xcc, 0x90, 0xad, 0x2e, 0xfd, 0x70, - 0xab, 0x81, 0xdd, 0x86, 0x8a, 0x1f, 0x8e, 0xc4, 0x05, 0xf9, 0x5f, 0xda, 0xbb, 0x6e, 0x55, 0x35, - 0x07, 0x53, 0x1d, 0x4d, 0x75, 0x1f, 0x45, 0xdc, 0x20, 0x9c, 0x3f, 0x16, 0xa0, 0x6a, 0x42, 0x89, - 0xdd, 0x84, 0xf2, 0x44, 0x68, 0x97, 0xec, 0x37, 0x77, 0xea, 0x66, 0x4b, 0xb5, 0xcb, 0x89, 0x8b, - 0x51, 0x3a, 0x91, 0x53, 0x5c, 0xfb, 0x62, 0x16, 0xa5, 0x4f, 0x90, 0xc3, 0xad, 0x80, 0xfd, 0x1f, - 0xd4, 0x42, 0xa1, 0xcf, 0xa5, 0x3a, 0xa3, 0x35, 0x6a, 0x99, 0xb0, 0x38, 0x14, 0xfa, 0x89, 0x1c, - 0x09, 0x9e, 0xc8, 0xd8, 0x5d, 0xa8, 0xc7, 0xc2, 0x9b, 0x2a, 0x5f, 0xcf, 0x68, 0xbd, 0x5a, 0x3b, - 0x6d, 0x0a, 0x56, 0xcb, 0x23, 0x70, 0x8a, 0x60, 0x77, 0xa0, 0x11, 0x0b, 0x4f, 0x09, 0x2d, 0xc2, - 0xcf, 0x69, 0xfd, 0x9a, 0x3b, 0x2b, 0x16, 0xae, 0x84, 0xee, 0x85, 0x9f, 0xf3, 0x4c, 0xee, 0xfc, - 0xa2, 0x08, 0x65, 0xf4, 0x99, 0x31, 0x28, 0xbb, 0xea, 0xc4, 0x64, 0x54, 0x83, 0x13, 0xcd, 0xda, - 0x50, 0x42, 0x1d, 0x45, 0x62, 0x21, 0x89, 0x1c, 0xef, 0x7c, 0x64, 0x37, 0x14, 0x49, 0x1c, 0x37, - 0x8d, 0x85, 0xb2, 0xfb, 0x48, 0x34, 0xbb, 0x0d, 0x8d, 0x48, 0xc9, 0x8b, 0xd9, 0x0b, 0xe3, 0x41, - 0x16, 0xa5, 0xc8, 0x44, 0x07, 0xea, 0x91, 0xa5, 0xd8, 0x26, 0x80, 0xb8, 0xd0, 0xca, 0x3d, 0x90, - 0xb1, 0x8e, 0x3b, 0x55, 0xf2, 0x96, 0xe2, 0x1e, 0x19, 0xfd, 0x23, 0x9e, 0x93, 0xb2, 0x55, 0xa8, - 0x9f, 0xca, 0x58, 0x87, 0xee, 0x44, 0x50, 0x86, 0x34, 0x78, 0xfa, 0xcd, 0x1c, 0xa8, 0x4e, 0x03, - 0x7f, 0xe2, 0xeb, 0x4e, 0x23, 0xd3, 0xf1, 0x8c, 0x38, 0xdc, 0x4a, 0x30, 0x8a, 0xbd, 0x13, 0x25, - 0xa7, 0xd1, 0x91, 0xab, 0x44, 0xa8, 0x29, 0x7f, 0x1a, 0x7c, 0x8e, 0xe7, 0xdc, 0x85, 0xaa, 0xb1, - 0x8c, 0x13, 0x43, 0xca, 0xc6, 0x3a, 0xd1, 0x18, 0xe3, 0xfd, 0xa3, 0x24, 0xc6, 0xfb, 0x47, 0x4e, - 0x17, 0xaa, 0xc6, 0x06, 0xa2, 0x0f, 0xd1, 0x2f, 0x8b, 0x46, 0x1a, 0x79, 0x43, 0x39, 0xd6, 0x26, - 0xa6, 0x38, 0xd1, 0xa4, 0xd5, 0x55, 0x66, 0x05, 0x4b, 0x9c, 0x68, 0xe7, 0x11, 0x34, 0xd2, 0xbd, - 0x21, 0x13, 0x5d, 0xab, 0xa6, 0xd8, 0xef, 0xe2, 0x00, 0x9a, 0xb0, 0x31, 0x4a, 0x34, 0x2e, 0x84, - 0x8c, 0xb4, 0x2f, 0x43, 0x37, 0x20, 0x45, 0x75, 0x9e, 0x7e, 0x3b, 0xbf, 0x2e, 0x41, 0x85, 0x82, - 0x8c, 0x6d, 0x60, 0x4c, 0x47, 0x53, 0x33, 0x83, 0xd2, 0x1e, 0xb3, 0x31, 0x0d, 0x94, 0x3d, 0x69, - 0x48, 0x63, 0x26, 0xad, 0x62, 0x7c, 0x05, 0xc2, 0xd3, 0x52, 0x59, 0x3b, 0xe9, 0x37, 0xda, 0x1f, - 0x61, 0x8e, 0x99, 0x2d, 0x27, 0x9a, 0xdd, 0x81, 0xaa, 0xa4, 0xc4, 0xa0, 0x5d, 0xff, 0x8e, 0x74, - 0xb1, 0x10, 0x54, 0xae, 0x84, 0x3b, 0x92, 0x61, 0x30, 0xa3, 0x58, 0xa8, 0xf3, 0xf4, 0x1b, 0x43, - 0x95, 0x32, 0xe1, 0xe9, 0x2c, 0x32, 0x07, 0x63, 0xcb, 0x84, 0xea, 0x93, 0x84, 0xc9, 0x33, 0x39, - 0x1e, 0x7d, 0x4f, 0x27, 0xd1, 0x38, 0x1e, 0x44, 0xba, 0x73, 0x3d, 0x0b, 0xaa, 0x84, 0xc7, 0x53, - 0x29, 0x22, 0x3d, 0xd7, 0x3b, 0x15, 0x88, 0xbc, 0x91, 0x21, 0xf7, 0x2d, 0x8f, 0xa7, 0xd2, 0x2c, - 0x57, 0x10, 0xfa, 0x26, 0x41, 0x73, 0xb9, 0x82, 0xd8, 0x4c, 0x8e, 0x31, 0x36, 0x1c, 0x1e, 0x20, - 0xf2, 0xad, 0xec, 0x7c, 0x36, 0x1c, 0x6e, 0x25, 0x66, 0xb6, 0xf1, 0x34, 0xd0, 0xfd, 0x6e, 0xe7, - 0x6d, 0xb3, 0x94, 0xc9, 0xb7, 0xb3, 0x96, 0x4d, 0x00, 0x97, 0x35, 0xf6, 0x7f, 0x6a, 0xe2, 0xa5, - 0xc4, 0x89, 0x76, 0xfa, 0x50, 0x4f, 0x5c, 0xbc, 0x14, 0x06, 0xf7, 0xa0, 0x16, 0x9f, 0xba, 0xca, - 0x0f, 0x4f, 0x68, 0x87, 0x5a, 0x3b, 0xd7, 0xd3, 0x19, 0x0d, 0x0d, 0x1f, 0xbd, 0x48, 0x30, 0x8e, - 0x4c, 0x42, 0xea, 0x2a, 0x5d, 0x6d, 0x28, 0x4d, 0xfd, 0x11, 0xe9, 0x59, 0xe1, 0x48, 0x22, 0xe7, - 0xc4, 0x37, 0x41, 0xb9, 0xc2, 0x91, 0x44, 0xff, 0x26, 0x72, 0x64, 0xaa, 0xde, 0x0a, 0x27, 0x7a, - 0x2e, 0xec, 0x2a, 0x0b, 0x61, 0x17, 0x24, 0x6b, 0xf3, 0x5f, 0xb1, 0xf6, 0xcb, 0x02, 0xd4, 0x93, - 0x52, 0x8d, 0x05, 0xc3, 0x1f, 0x89, 0x50, 0xfb, 0x63, 0x5f, 0x28, 0x6b, 0x38, 0xc7, 0x61, 0xf7, - 0xa0, 0xe2, 0x6a, 0xad, 0x92, 0x63, 0xf8, 0xed, 0x7c, 0x9d, 0xdf, 0xda, 0x45, 0x49, 0x2f, 0xd4, - 0x6a, 0xc6, 0x0d, 0x6a, 0xf5, 0x63, 0x80, 0x8c, 0x89, 0xbe, 0x9e, 0x89, 0x99, 0xd5, 0x8a, 0x24, - 0xbb, 0x01, 0x95, 0xcf, 0xdd, 0x60, 0x9a, 0x64, 0xa4, 0xf9, 0x78, 0x50, 0xfc, 0xb8, 0xe0, 0xfc, - 0xa1, 0x08, 0x35, 0x5b, 0xf7, 0xd9, 0x5d, 0xa8, 0x51, 0xdd, 0xb7, 0x1e, 0x5d, 0x9d, 0x7e, 0x09, - 0x84, 0x6d, 0xa7, 0x0d, 0x4d, 0xce, 0x47, 0xab, 0xca, 0x34, 0x36, 0xd6, 0xc7, 0xac, 0xbd, 0x29, - 0x8d, 0xc4, 0xd8, 0x76, 0x2e, 0x2d, 0xea, 0x13, 0xc4, 0xd8, 0x0f, 0x7d, 0x5c, 0x1f, 0x8e, 0x22, - 0x76, 0x37, 0x99, 0x75, 0x99, 0x34, 0xbe, 0x95, 0xd7, 0x78, 0x79, 0xd2, 0x7d, 0x68, 0xe6, 0xcc, - 0x5c, 0x31, 0xeb, 0xf7, 0xf3, 0xb3, 0xb6, 0x26, 0x49, 0x9d, 0x69, 0xbb, 0xb2, 0x55, 0xf8, 0x37, - 0xd6, 0xef, 0x23, 0x80, 0x4c, 0xe5, 0xf7, 0x3f, 0xbe, 0x9c, 0xdf, 0x97, 0x00, 0x06, 0x11, 0x56, - 0xb1, 0x91, 0x4b, 0x75, 0x77, 0xd9, 0x3f, 0x09, 0xa5, 0x12, 0x2f, 0x28, 0xcd, 0x69, 0x7c, 0x9d, - 0x37, 0x0d, 0x8f, 0x32, 0x86, 0xed, 0x42, 0x73, 0x24, 0x62, 0x4f, 0xf9, 0x14, 0x50, 0x76, 0xd1, - 0x6f, 0xe1, 0x9c, 0x32, 0x3d, 0x5b, 0xdd, 0x0c, 0x61, 0xd6, 0x2a, 0x3f, 0x86, 0xed, 0xc0, 0xb2, - 0xb8, 0x88, 0xa4, 0xd2, 0xd6, 0x8a, 0x69, 0x0f, 0xaf, 0x99, 0x46, 0x13, 0xf9, 0x64, 0x89, 0x37, - 0x45, 0xf6, 0xc1, 0x5c, 0x28, 0x7b, 0x6e, 0x14, 0xdb, 0xa2, 0xdc, 0x59, 0xb0, 0xb7, 0xef, 0x46, - 0x66, 0xd1, 0xf6, 0x3e, 0xc4, 0xb9, 0xfe, 0xfc, 0x2f, 0xb7, 0xee, 0xe4, 0x3a, 0x99, 0x89, 0x3c, - 0x9e, 0x6d, 0x53, 0xbc, 0x9c, 0xf9, 0x7a, 0x7b, 0xaa, 0xfd, 0x60, 0xdb, 0x8d, 0x7c, 0x54, 0x87, - 0x03, 0xfb, 0x5d, 0x4e, 0xaa, 0xd9, 0xc7, 0xd0, 0x8a, 0x94, 0x3c, 0x51, 0x22, 0x8e, 0x5f, 0x50, - 0x5d, 0xb3, 0xfd, 0xe6, 0x1b, 0xb6, 0xfe, 0x92, 0xe4, 0x13, 0x14, 0xf0, 0x95, 0x28, 0xff, 0xb9, - 0xfa, 0x43, 0x68, 0x2f, 0xce, 0xf8, 0x75, 0x76, 0x6f, 0xf5, 0x3e, 0x34, 0xd2, 0x19, 0xbc, 0x6a, - 0x60, 0x3d, 0xbf, 0xed, 0xbf, 0x2b, 0x40, 0xd5, 0xe4, 0x23, 0xbb, 0x0f, 0x8d, 0x40, 0x7a, 0x2e, - 0x3a, 0x90, 0xf4, 0xf6, 0xef, 0x64, 0xe9, 0xba, 0xf5, 0x38, 0x91, 0x99, 0xfd, 0xc8, 0xb0, 0x18, - 0x9e, 0x7e, 0x38, 0x96, 0x49, 0xfe, 0xb4, 0xb2, 0x41, 0xfd, 0x70, 0x2c, 0xb9, 0x11, 0xae, 0x3e, - 0x82, 0xd6, 0xbc, 0x8a, 0x2b, 0xfc, 0x7c, 0x6f, 0x3e, 0xd0, 0xa9, 0x1a, 0xa4, 0x83, 0xf2, 0x6e, - 0xdf, 0x87, 0x46, 0xca, 0x67, 0x9b, 0x97, 0x1d, 0x5f, 0xce, 0x8f, 0xcc, 0xf9, 0xea, 0x04, 0x00, - 0x99, 0x6b, 0x78, 0xcc, 0xe1, 0x25, 0x22, 0xcc, 0x9a, 0x87, 0xf4, 0x9b, 0x6a, 0xaf, 0xab, 0x5d, - 0x72, 0x65, 0x99, 0x13, 0xcd, 0xb6, 0x00, 0x46, 0x69, 0xaa, 0x7f, 0xc7, 0x01, 0x90, 0x43, 0x38, - 0x03, 0xa8, 0x27, 0x4e, 0xb0, 0x75, 0x68, 0xc6, 0xd6, 0x32, 0xf6, 0xba, 0x68, 0xae, 0xc2, 0xf3, - 0x2c, 0xec, 0x59, 0x95, 0x1b, 0x9e, 0x88, 0xb9, 0x9e, 0x95, 0x23, 0x87, 0x5b, 0x81, 0xf3, 0x19, - 0x54, 0x88, 0x81, 0x09, 0x1a, 0x6b, 0x57, 0x69, 0xdb, 0xfe, 0x9a, 0x0e, 0x4f, 0xc6, 0x64, 0x76, - 0xaf, 0x8c, 0x21, 0xcc, 0x0d, 0x80, 0xbd, 0x8f, 0x7d, 0xe4, 0xc8, 0xae, 0xe8, 0x55, 0x38, 0x14, - 0x3b, 0x3f, 0x80, 0x7a, 0xc2, 0xc6, 0x99, 0x3f, 0xf6, 0x43, 0x61, 0x5d, 0x24, 0x1a, 0xaf, 0x0d, - 0xfb, 0xa7, 0xae, 0x72, 0x3d, 0x2d, 0x4c, 0x9b, 0x52, 0xe1, 0x19, 0xc3, 0x79, 0x0f, 0x9a, 0xb9, - 0xbc, 0xc3, 0x70, 0x7b, 0x4e, 0xdb, 0x68, 0xb2, 0xdf, 0x7c, 0x38, 0x9f, 0xc0, 0xca, 0x5c, 0x0e, - 0x60, 0xb1, 0xf2, 0x47, 0x49, 0xb1, 0x32, 0x85, 0xe8, 0x52, 0xb7, 0xc5, 0xa0, 0x7c, 0x2e, 0xdc, - 0x33, 0xdb, 0x69, 0x11, 0xed, 0xfc, 0x16, 0x6f, 0x47, 0x49, 0x0f, 0xfb, 0xbf, 0x00, 0xa7, 0x5a, - 0x47, 0x2f, 0xa8, 0xa9, 0xb5, 0xca, 0x1a, 0xc8, 0x21, 0x04, 0xbb, 0x05, 0x4d, 0xfc, 0x88, 0xad, - 0xdc, 0xa8, 0xa6, 0x11, 0xb1, 0x01, 0xfc, 0x0f, 0x34, 0xc6, 0xe9, 0xf0, 0x92, 0x8d, 0x81, 0x64, - 0xf4, 0x3b, 0x50, 0x0f, 0xa5, 0x95, 0x99, 0x1e, 0xbb, 0x16, 0xca, 0x74, 0x9c, 0x1b, 0x04, 0x56, - 0x56, 0x31, 0xe3, 0xdc, 0x20, 0x20, 0xa1, 0x73, 0x07, 0xde, 0xb8, 0x74, 0xcf, 0x63, 0x6f, 0x41, - 0x75, 0xec, 0x07, 0x9a, 0x8a, 0x12, 0xf6, 0xf4, 0xf6, 0xcb, 0xf9, 0x47, 0x01, 0x20, 0x8b, 0x1f, - 0xcc, 0x0a, 0xac, 0x2e, 0x88, 0x59, 0x36, 0xd5, 0x24, 0x80, 0xfa, 0xc4, 0x9e, 0x53, 0x36, 0x32, - 0x6e, 0xce, 0xc7, 0xdc, 0x56, 0x72, 0x8c, 0x99, 0x13, 0x6c, 0xc7, 0x9e, 0x60, 0xaf, 0x73, 0x17, - 0x4b, 0x2d, 0x50, 0xa3, 0x95, 0xbf, 0x9a, 0x43, 0x96, 0xce, 0xdc, 0x4a, 0x56, 0x1f, 0xc1, 0xca, - 0x9c, 0xc9, 0xef, 0x59, 0xb3, 0xb2, 0xf3, 0x36, 0x9f, 0xcb, 0x3b, 0x50, 0x35, 0x77, 0x7a, 0xb6, - 0x01, 0x35, 0xd7, 0x33, 0x69, 0x9c, 0x3b, 0x4a, 0x50, 0xb8, 0x4b, 0x6c, 0x9e, 0x88, 0x9d, 0x3f, - 0x15, 0x01, 0x32, 0xfe, 0x6b, 0x74, 0xdb, 0x0f, 0xa0, 0x15, 0x0b, 0x4f, 0x86, 0x23, 0x57, 0xcd, - 0x48, 0x6a, 0x2f, 0x9d, 0x57, 0x0d, 0x59, 0x40, 0xe6, 0x3a, 0xef, 0xd2, 0xab, 0x3b, 0xef, 0x0d, - 0x28, 0x7b, 0x32, 0x9a, 0xd9, 0xd2, 0xc4, 0xe6, 0x27, 0xb2, 0x2f, 0xa3, 0xd9, 0xc1, 0x12, 0x27, - 0x04, 0xdb, 0x82, 0xea, 0xe4, 0x8c, 0x5e, 0x39, 0xcc, 0x6d, 0xed, 0xc6, 0x3c, 0xf6, 0xc9, 0x19, - 0xd2, 0x07, 0x4b, 0xdc, 0xa2, 0xd8, 0x1d, 0xa8, 0x4c, 0xce, 0x46, 0xbe, 0xb2, 0xc5, 0xe5, 0xfa, - 0x22, 0xbc, 0xeb, 0x2b, 0x7a, 0xd4, 0x40, 0x0c, 0x73, 0xa0, 0xa8, 0x26, 0xf6, 0x49, 0xa3, 0xbd, - 0xb0, 0x9a, 0x93, 0x83, 0x25, 0x5e, 0x54, 0x93, 0xbd, 0x3a, 0x54, 0xcd, 0xba, 0x3a, 0x7f, 0x2f, - 0x41, 0x6b, 0xde, 0x4b, 0xdc, 0xd9, 0x58, 0x79, 0xc9, 0xce, 0xc6, 0xca, 0x4b, 0x2f, 0x25, 0xc5, - 0xdc, 0xa5, 0xc4, 0x81, 0x8a, 0x3c, 0x0f, 0x85, 0xca, 0x3f, 0xe7, 0xec, 0x9f, 0xca, 0xf3, 0x10, - 0x1b, 0x63, 0x23, 0x9a, 0xeb, 0x33, 0x2b, 0xb6, 0xcf, 0x7c, 0x1f, 0x56, 0xc6, 0x32, 0x08, 0xe4, - 0xf9, 0x70, 0x36, 0x09, 0xfc, 0xf0, 0xcc, 0x36, 0x9b, 0xf3, 0x4c, 0xb6, 0x01, 0xd7, 0x46, 0xbe, - 0x42, 0x77, 0xf6, 0x65, 0xa8, 0x45, 0x48, 0x97, 0x55, 0xc4, 0x2d, 0xb2, 0xd9, 0xa7, 0xb0, 0xee, - 0x6a, 0x2d, 0x26, 0x91, 0x7e, 0x16, 0x46, 0xae, 0x77, 0xd6, 0x95, 0x1e, 0x65, 0xe1, 0x24, 0x72, - 0xb5, 0x7f, 0xec, 0x07, 0x78, 0x89, 0xaf, 0xd1, 0xd0, 0x57, 0xe2, 0xd8, 0x07, 0xd0, 0xf2, 0x94, - 0x70, 0xb5, 0xe8, 0x8a, 0x58, 0x1f, 0xb9, 0xfa, 0xb4, 0x53, 0xa7, 0x91, 0x0b, 0x5c, 0x9c, 0x83, - 0x8b, 0xde, 0x7e, 0xe6, 0x07, 0x23, 0x0f, 0xaf, 0x97, 0x0d, 0x33, 0x87, 0x39, 0x26, 0xdb, 0x02, - 0x46, 0x8c, 0xde, 0x24, 0xd2, 0xb3, 0x14, 0x0a, 0x04, 0xbd, 0x42, 0x82, 0x07, 0xae, 0xf6, 0x27, - 0x22, 0xd6, 0xee, 0x24, 0xa2, 0xf7, 0xa3, 0x12, 0xcf, 0x18, 0xec, 0x36, 0xb4, 0xfd, 0xd0, 0x0b, - 0xa6, 0x23, 0xf1, 0x22, 0xc2, 0x89, 0xa8, 0x30, 0xee, 0x2c, 0xd3, 0xa9, 0x72, 0xcd, 0xf2, 0x8f, - 0x2c, 0x1b, 0xa1, 0xe2, 0x62, 0x01, 0xba, 0x62, 0xa0, 0x96, 0x9f, 0x40, 0x9d, 0x2f, 0x0a, 0xd0, - 0x5e, 0x0c, 0x3c, 0xdc, 0xb6, 0x08, 0x27, 0x6f, 0x2f, 0xd7, 0x48, 0xa7, 0x5b, 0x59, 0xcc, 0x6d, - 0x65, 0x52, 0x2f, 0x4b, 0xb9, 0x7a, 0x99, 0x86, 0x45, 0xf9, 0xbb, 0xc3, 0x62, 0x6e, 0xa2, 0x95, - 0x85, 0x89, 0x3a, 0xbf, 0x29, 0xc0, 0xb5, 0x85, 0xe0, 0xfe, 0xde, 0x1e, 0xad, 0x43, 0x73, 0xe2, - 0x9e, 0x09, 0xf3, 0xb8, 0x10, 0xdb, 0x12, 0x92, 0x67, 0xfd, 0x07, 0xfc, 0x0b, 0x61, 0x39, 0x9f, - 0x51, 0x57, 0xfa, 0x96, 0x04, 0xc8, 0xa1, 0xd4, 0x0f, 0xe5, 0xd4, 0xd6, 0xe2, 0x24, 0x40, 0x12, - 0xe6, 0xe5, 0x30, 0x2a, 0x5d, 0x11, 0x46, 0xce, 0x21, 0xd4, 0x13, 0x07, 0xd9, 0x2d, 0xfb, 0xfa, - 0x53, 0xc8, 0x1e, 0x35, 0x9f, 0xc5, 0x42, 0xa1, 0xef, 0xe6, 0x29, 0xe8, 0x5d, 0xa8, 0x98, 0x36, - 0xb4, 0x78, 0x19, 0x61, 0x24, 0xce, 0x10, 0x6a, 0x96, 0xc3, 0x36, 0xa1, 0x7a, 0x3c, 0x4b, 0xdf, - 0x51, 0xec, 0x71, 0x81, 0xdf, 0x23, 0x8b, 0xc0, 0x33, 0xc8, 0x20, 0xd8, 0x0d, 0x28, 0x1f, 0xcf, - 0xfa, 0x5d, 0x73, 0xb1, 0xc4, 0x93, 0x0c, 0xbf, 0xf6, 0xaa, 0xc6, 0x21, 0xe7, 0x31, 0x2c, 0xe7, - 0xc7, 0xa5, 0x85, 0xbd, 0x90, 0x2b, 0xec, 0xe9, 0x91, 0x5d, 0x7c, 0xd5, 0x0d, 0xe3, 0x23, 0x00, - 0x7a, 0xab, 0x7d, 0xdd, 0x9b, 0xc9, 0xff, 0x43, 0xcd, 0xbe, 0xf1, 0xb2, 0x0f, 0x16, 0xde, 0xac, - 0x5b, 0xe9, 0x03, 0xf0, 0xdc, 0xc3, 0xb5, 0xf3, 0x00, 0x7b, 0xd4, 0x73, 0xa1, 0xba, 0xfe, 0x78, - 0xfc, 0xba, 0xe6, 0x1e, 0x40, 0xeb, 0x59, 0x14, 0xfd, 0x6b, 0x63, 0x7f, 0x02, 0x55, 0xf3, 0xd4, - 0x8c, 0x63, 0x02, 0xf4, 0xc0, 0xee, 0x01, 0x33, 0x7d, 0x6c, 0xde, 0x25, 0x6e, 0x00, 0x88, 0x9c, - 0xa2, 0x3d, 0xbb, 0xb9, 0x84, 0x9c, 0x77, 0x80, 0x1b, 0xc0, 0xe6, 0x06, 0xd4, 0xec, 0xab, 0x26, - 0x6b, 0x40, 0xe5, 0xd9, 0xe1, 0xb0, 0xf7, 0xb4, 0xbd, 0xc4, 0xea, 0x50, 0x3e, 0x18, 0x0c, 0x9f, - 0xb6, 0x0b, 0x48, 0x1d, 0x0e, 0x0e, 0x7b, 0xed, 0xe2, 0xe6, 0x6d, 0x58, 0xce, 0xbf, 0x6b, 0xb2, - 0x26, 0xd4, 0x86, 0xbb, 0x87, 0xdd, 0xbd, 0xc1, 0x8f, 0xdb, 0x4b, 0x6c, 0x19, 0xea, 0xfd, 0xc3, - 0x61, 0x6f, 0xff, 0x19, 0xef, 0xb5, 0x0b, 0x9b, 0x3f, 0x82, 0x46, 0xfa, 0x50, 0x84, 0x1a, 0xf6, - 0xfa, 0x87, 0xdd, 0xf6, 0x12, 0x03, 0xa8, 0x0e, 0x7b, 0xfb, 0xbc, 0x87, 0x7a, 0x6b, 0x50, 0x1a, - 0x0e, 0x0f, 0xda, 0x45, 0xb4, 0xba, 0xbf, 0xbb, 0x7f, 0xd0, 0x6b, 0x97, 0x90, 0x7c, 0xfa, 0xe4, - 0xe8, 0xe1, 0xb0, 0x5d, 0xde, 0xfc, 0x08, 0xae, 0x2d, 0x3c, 0xa1, 0xd0, 0xe8, 0x83, 0x5d, 0xde, - 0x43, 0x4d, 0x4d, 0xa8, 0x1d, 0xf1, 0xfe, 0xf3, 0xdd, 0xa7, 0xbd, 0x76, 0x01, 0x05, 0x8f, 0x07, - 0xfb, 0x8f, 0x7a, 0xdd, 0x76, 0x71, 0xef, 0xe6, 0x97, 0x2f, 0xd7, 0x0a, 0x5f, 0xbd, 0x5c, 0x2b, - 0x7c, 0xfd, 0x72, 0xad, 0xf0, 0xd7, 0x97, 0x6b, 0x85, 0x2f, 0xbe, 0x5d, 0x5b, 0xfa, 0xea, 0xdb, - 0xb5, 0xa5, 0xaf, 0xbf, 0x5d, 0x5b, 0x3a, 0xae, 0xd2, 0x9f, 0x15, 0x1f, 0xfe, 0x33, 0x00, 0x00, - 0xff, 0xff, 0x92, 0xc4, 0x20, 0x2a, 0xec, 0x18, 0x00, 0x00, + 0xf1, 0x17, 0x7f, 0x93, 0x43, 0x89, 0x66, 0xd6, 0x4e, 0xc2, 0xe8, 0xeb, 0xaf, 0xac, 0xbc, 0xa4, + 0x81, 0x2c, 0xdb, 0x32, 0xaa, 0x00, 0x71, 0x60, 0x04, 0x45, 0x25, 0x91, 0x8e, 0x18, 0xc7, 0xa2, + 0xb0, 0xb4, 0x9d, 0x1e, 0x0a, 0x18, 0x4f, 0x8f, 0x4b, 0xe9, 0x41, 0x8f, 0x6f, 0x1f, 0xf6, 0x2d, + 0x2d, 0xb1, 0x87, 0x1e, 0xfa, 0x17, 0x04, 0x28, 0x50, 0xf4, 0x52, 0xf4, 0x9f, 0xe8, 0xb1, 0xbd, + 0x07, 0xc8, 0x25, 0x87, 0x1e, 0x82, 0x1e, 0xd2, 0xc2, 0xb9, 0xf4, 0x8f, 0x68, 0x81, 0x62, 0x66, + 0xf7, 0xfd, 0x20, 0x25, 0xc3, 0x71, 0x5b, 0xf4, 0xc4, 0x79, 0x33, 0x9f, 0x9d, 0x9d, 0x9d, 0x9d, + 0xd9, 0x99, 0x5d, 0x42, 0x43, 0x46, 0xf1, 0x56, 0xa4, 0xa4, 0x96, 0xac, 0x18, 0x1d, 0xad, 0xde, + 0x39, 0xf6, 0xf5, 0xc9, 0xf4, 0x68, 0xcb, 0x93, 0x93, 0xbb, 0xc7, 0xf2, 0x58, 0xde, 0x25, 0xd1, + 0xd1, 0x74, 0x4c, 0x5f, 0xf4, 0x41, 0x94, 0x19, 0xe2, 0xfc, 0xbd, 0x08, 0xc5, 0x41, 0xc4, 0xde, + 0x85, 0xaa, 0x1f, 0x46, 0x53, 0x1d, 0x77, 0x0a, 0xeb, 0xa5, 0x8d, 0xe6, 0x76, 0x63, 0x2b, 0x3a, + 0xda, 0xea, 0x23, 0x87, 0x5b, 0x01, 0x5b, 0x87, 0xb2, 0x38, 0x17, 0x5e, 0xa7, 0xb8, 0x5e, 0xd8, + 0x68, 0x6e, 0x03, 0x02, 0x7a, 0xe7, 0xc2, 0x1b, 0x44, 0xfb, 0x4b, 0x9c, 0x24, 0xec, 0x03, 0xa8, + 0xc6, 0x72, 0xaa, 0x3c, 0xd1, 0x29, 0x11, 0x66, 0x19, 0x31, 0x43, 0xe2, 0x10, 0xca, 0x4a, 0x51, + 0xd3, 0xd8, 0x0f, 0x44, 0xa7, 0x9c, 0x69, 0x7a, 0xe0, 0x07, 0x06, 0x43, 0x12, 0xf6, 0x1e, 0x54, + 0x8e, 0xa6, 0x7e, 0x30, 0xea, 0x54, 0x08, 0xd2, 0x44, 0xc8, 0x2e, 0x32, 0x08, 0x63, 0x64, 0x08, + 0x9a, 0x08, 0x75, 0x2c, 0x3a, 0xd5, 0x0c, 0xf4, 0x08, 0x19, 0x06, 0x44, 0x32, 0x9c, 0x6b, 0xe4, + 0x8f, 0xc7, 0x9d, 0x5a, 0x36, 0x57, 0xd7, 0x1f, 0x8f, 0xcd, 0x5c, 0x28, 0x61, 0x1b, 0x50, 0x8f, + 0x02, 0x57, 0x8f, 0xa5, 0x9a, 0x74, 0x20, 0xb3, 0xfb, 0xd0, 0xf2, 0x78, 0x2a, 0x65, 0xf7, 0xa0, + 0xe9, 0xc9, 0x30, 0xd6, 0xca, 0xf5, 0x43, 0x1d, 0x77, 0x9a, 0x04, 0x7e, 0x13, 0xc1, 0x5f, 0x48, + 0x75, 0x2a, 0xd4, 0x5e, 0x26, 0xe4, 0x79, 0xe4, 0x6e, 0x19, 0x8a, 0x32, 0x72, 0x7e, 0x53, 0x80, + 0x7a, 0xa2, 0x95, 0x39, 0xb0, 0xbc, 0xa3, 0xbc, 0x13, 0x5f, 0x0b, 0x4f, 0x4f, 0x95, 0xe8, 0x14, + 0xd6, 0x0b, 0x1b, 0x0d, 0x3e, 0xc7, 0x63, 0x2d, 0x28, 0x0e, 0x86, 0xe4, 0xef, 0x06, 0x2f, 0x0e, + 0x86, 0xac, 0x03, 0xb5, 0xa7, 0xae, 0xf2, 0xdd, 0x50, 0x93, 0x83, 0x1b, 0x3c, 0xf9, 0x64, 0xd7, + 0xa1, 0x31, 0x18, 0x3e, 0x15, 0x2a, 0xf6, 0x65, 0x48, 0x6e, 0x6d, 0xf0, 0x8c, 0xc1, 0xd6, 0x00, + 0x06, 0xc3, 0x07, 0xc2, 0x45, 0xa5, 0x71, 0xa7, 0xb2, 0x5e, 0xda, 0x68, 0xf0, 0x1c, 0xc7, 0xf9, + 0x25, 0x54, 0x68, 0xab, 0xd9, 0x67, 0x50, 0x1d, 0xf9, 0xc7, 0x22, 0xd6, 0xc6, 0x9c, 0xdd, 0xed, + 0xaf, 0xbe, 0xbb, 0xb1, 0xf4, 0x97, 0xef, 0x6e, 0x6c, 0xe6, 0x62, 0x4a, 0x46, 0x22, 0xf4, 0x64, + 0xa8, 0x5d, 0x3f, 0x14, 0x2a, 0xbe, 0x7b, 0x2c, 0xef, 0x98, 0x21, 0x5b, 0x5d, 0xfa, 0xe1, 0x56, + 0x03, 0xbb, 0x09, 0x15, 0x3f, 0x1c, 0x89, 0x73, 0xb2, 0xbf, 0xb4, 0x7b, 0xd5, 0xaa, 0x6a, 0x0e, + 0xa6, 0x3a, 0x9a, 0xea, 0x3e, 0x8a, 0xb8, 0x41, 0x38, 0x5f, 0x17, 0xa0, 0x6a, 0x42, 0x89, 0x5d, + 0x87, 0xf2, 0x44, 0x68, 0x97, 0xe6, 0x6f, 0x6e, 0xd7, 0xcd, 0x96, 0x6a, 0x97, 0x13, 0x17, 0xa3, + 0x74, 0x22, 0xa7, 0xe8, 0xfb, 0x62, 0x16, 0xa5, 0x8f, 0x90, 0xc3, 0xad, 0x80, 0xfd, 0x08, 0x6a, + 0xa1, 0xd0, 0x67, 0x52, 0x9d, 0x92, 0x8f, 0x5a, 0x26, 0x2c, 0x0e, 0x84, 0x7e, 0x24, 0x47, 0x82, + 0x27, 0x32, 0x76, 0x1b, 0xea, 0xb1, 0xf0, 0xa6, 0xca, 0xd7, 0x33, 0xf2, 0x57, 0x6b, 0xbb, 0x4d, + 0xc1, 0x6a, 0x79, 0x04, 0x4e, 0x11, 0xec, 0x16, 0x34, 0x62, 0xe1, 0x29, 0xa1, 0x45, 0xf8, 0x9c, + 0xfc, 0xd7, 0xdc, 0x5e, 0xb1, 0x70, 0x25, 0x74, 0x2f, 0x7c, 0xce, 0x33, 0xb9, 0xf3, 0x75, 0x11, + 0xca, 0x68, 0x33, 0x63, 0x50, 0x76, 0xd5, 0xb1, 0xc9, 0xa8, 0x06, 0x27, 0x9a, 0xb5, 0xa1, 0x84, + 0x3a, 0x8a, 0xc4, 0x42, 0x12, 0x39, 0xde, 0xd9, 0xc8, 0x6e, 0x28, 0x92, 0x38, 0x6e, 0x1a, 0x0b, + 0x65, 0xf7, 0x91, 0x68, 0x76, 0x13, 0x1a, 0x91, 0x92, 0xe7, 0xb3, 0x67, 0xc6, 0x82, 0x2c, 0x4a, + 0x91, 0x89, 0x06, 0xd4, 0x23, 0x4b, 0xb1, 0x4d, 0x00, 0x71, 0xae, 0x95, 0xbb, 0x2f, 0x63, 0x1d, + 0x77, 0xaa, 0x64, 0x2d, 0xc5, 0x3d, 0x32, 0xfa, 0x87, 0x3c, 0x27, 0x65, 0xab, 0x50, 0x3f, 0x91, + 0xb1, 0x0e, 0xdd, 0x89, 0xa0, 0x0c, 0x69, 0xf0, 0xf4, 0x9b, 0x39, 0x50, 0x9d, 0x06, 0xfe, 0xc4, + 0xd7, 0x9d, 0x46, 0xa6, 0xe3, 0x09, 0x71, 0xb8, 0x95, 0x60, 0x14, 0x7b, 0xc7, 0x4a, 0x4e, 0xa3, + 0x43, 0x57, 0x89, 0x50, 0x53, 0xfe, 0x34, 0xf8, 0x1c, 0x8f, 0x7d, 0x02, 0xef, 0x28, 0x31, 0x91, + 0xcf, 0x05, 0x6d, 0xd4, 0x50, 0x4f, 0x8f, 0x62, 0x8e, 0x8e, 0x8d, 0xfd, 0xe7, 0x82, 0x72, 0xa8, + 0xce, 0x5f, 0x0e, 0x70, 0x6e, 0x43, 0xd5, 0xd8, 0x8d, 0x6e, 0x41, 0xca, 0x66, 0x0a, 0xd1, 0x98, + 0x21, 0xfd, 0xc3, 0x24, 0x43, 0xfa, 0x87, 0x4e, 0x17, 0xaa, 0xc6, 0x42, 0x44, 0x1f, 0xe0, 0xaa, + 0x2c, 0x1a, 0x69, 0xe4, 0x0d, 0xe5, 0x58, 0x9b, 0x88, 0xe4, 0x44, 0x93, 0x56, 0x57, 0x19, 0xff, + 0x97, 0x38, 0xd1, 0xce, 0x43, 0x68, 0xa4, 0x3b, 0x4b, 0x53, 0x74, 0xad, 0x9a, 0x62, 0xbf, 0x8b, + 0x03, 0xc8, 0x5d, 0x66, 0x52, 0xa2, 0xd1, 0x8d, 0x32, 0xd2, 0xbe, 0x0c, 0xdd, 0x80, 0x14, 0xd5, + 0x79, 0xfa, 0xed, 0xfc, 0xb6, 0x04, 0x15, 0x5a, 0x18, 0xdb, 0xc0, 0x8c, 0x88, 0xa6, 0x66, 0x05, + 0xa5, 0x5d, 0x66, 0x33, 0x02, 0x28, 0xf7, 0xd2, 0x84, 0xc0, 0x3c, 0x5c, 0xc5, 0xe8, 0x0c, 0x84, + 0xa7, 0xa5, 0xb2, 0xf3, 0xa4, 0xdf, 0x38, 0xff, 0x08, 0x33, 0xd4, 0x04, 0x0c, 0xd1, 0xec, 0x16, + 0x54, 0x25, 0xa5, 0x15, 0xc5, 0xcc, 0x4b, 0x92, 0xcd, 0x42, 0x50, 0xb9, 0x12, 0xee, 0x48, 0x86, + 0xc1, 0x8c, 0x22, 0xa9, 0xce, 0xd3, 0x6f, 0x0c, 0x74, 0xca, 0xa3, 0xc7, 0xb3, 0xc8, 0x1c, 0xab, + 0x2d, 0x13, 0xe8, 0x8f, 0x12, 0x26, 0xcf, 0xe4, 0x78, 0x70, 0x3e, 0x9e, 0x44, 0xe3, 0x78, 0x10, + 0xe9, 0xce, 0xd5, 0x2c, 0x24, 0x13, 0x1e, 0x4f, 0xa5, 0x88, 0xf4, 0x5c, 0xef, 0x44, 0x20, 0xf2, + 0x5a, 0x86, 0xdc, 0xb3, 0x3c, 0x9e, 0x4a, 0xb3, 0x4c, 0x43, 0xe8, 0x9b, 0x04, 0xcd, 0x65, 0x1a, + 0x62, 0x33, 0x39, 0x46, 0xe8, 0x70, 0xb8, 0x8f, 0xc8, 0xb7, 0xb2, 0xd3, 0xdd, 0x70, 0xb8, 0x95, + 0x98, 0xd5, 0xc6, 0xd3, 0x40, 0xf7, 0xbb, 0x9d, 0xb7, 0x8d, 0x2b, 0x93, 0x6f, 0x67, 0x2d, 0x5b, + 0x00, 0xba, 0x35, 0xf6, 0x7f, 0x61, 0xe2, 0xa5, 0xc4, 0x89, 0x76, 0xfa, 0x50, 0x4f, 0x4c, 0xbc, + 0x10, 0x06, 0x77, 0xa0, 0x16, 0x9f, 0xb8, 0xca, 0x0f, 0x8f, 0x69, 0x87, 0x5a, 0xdb, 0x57, 0xd3, + 0x15, 0x0d, 0x0d, 0x1f, 0xad, 0x48, 0x30, 0x8e, 0x4c, 0x42, 0xea, 0x32, 0x5d, 0x6d, 0x28, 0x4d, + 0xfd, 0x11, 0xe9, 0x59, 0xe1, 0x48, 0x22, 0xe7, 0xd8, 0x37, 0x41, 0xb9, 0xc2, 0x91, 0x44, 0xfb, + 0x26, 0x72, 0x64, 0x6a, 0xe6, 0x0a, 0x27, 0x7a, 0x2e, 0xec, 0x2a, 0x0b, 0x61, 0x17, 0x24, 0xbe, + 0xf9, 0x9f, 0xcc, 0xf6, 0xeb, 0x02, 0xd4, 0x93, 0x42, 0x8f, 0xe5, 0xc6, 0x1f, 0x89, 0x50, 0xfb, + 0x63, 0x5f, 0x28, 0x3b, 0x71, 0x8e, 0xc3, 0xee, 0x40, 0xc5, 0xd5, 0x5a, 0x25, 0x87, 0xf8, 0xdb, + 0xf9, 0x2e, 0x61, 0x6b, 0x07, 0x25, 0xbd, 0x50, 0xab, 0x19, 0x37, 0xa8, 0xd5, 0x8f, 0x01, 0x32, + 0x26, 0xda, 0x7a, 0x2a, 0x66, 0x56, 0x2b, 0x92, 0xec, 0x1a, 0x54, 0x9e, 0xbb, 0xc1, 0x34, 0xc9, + 0x48, 0xf3, 0x71, 0xbf, 0xf8, 0x71, 0xc1, 0xf9, 0x53, 0x11, 0x6a, 0xb6, 0x6b, 0x60, 0xb7, 0xa1, + 0x46, 0x5d, 0x83, 0xb5, 0xe8, 0xf2, 0xf4, 0x4b, 0x20, 0xec, 0x6e, 0xda, 0x0e, 0xe5, 0x6c, 0xb4, + 0xaa, 0x4c, 0x5b, 0x64, 0x6d, 0xcc, 0x9a, 0xa3, 0xd2, 0x48, 0x8c, 0x6d, 0xdf, 0xd3, 0xa2, 0x2e, + 0x43, 0x8c, 0xfd, 0xd0, 0x47, 0xff, 0x70, 0x14, 0xb1, 0xdb, 0xc9, 0xaa, 0xcb, 0xa4, 0xf1, 0xad, + 0xbc, 0xc6, 0x8b, 0x8b, 0xee, 0x43, 0x33, 0x37, 0xcd, 0x25, 0xab, 0x7e, 0x3f, 0xbf, 0x6a, 0x3b, + 0x25, 0xa9, 0x33, 0x4d, 0x5b, 0xe6, 0x85, 0xff, 0xc0, 0x7f, 0x1f, 0x01, 0x64, 0x2a, 0x7f, 0xf8, + 0xf1, 0xe5, 0xfc, 0xb1, 0x04, 0x30, 0x88, 0xb0, 0x06, 0x8e, 0x5c, 0xaa, 0xda, 0xcb, 0xfe, 0x71, + 0x28, 0x95, 0x78, 0x46, 0x69, 0x4e, 0xe3, 0xeb, 0xbc, 0x69, 0x78, 0x94, 0x31, 0x6c, 0x07, 0x9a, + 0x23, 0x11, 0x7b, 0xca, 0xa7, 0x80, 0xb2, 0x4e, 0xbf, 0x81, 0x6b, 0xca, 0xf4, 0x6c, 0x75, 0x33, + 0x84, 0xf1, 0x55, 0x7e, 0x0c, 0xdb, 0x86, 0x65, 0x71, 0x1e, 0x49, 0xa5, 0xed, 0x2c, 0xa6, 0xb9, + 0xbc, 0x62, 0xda, 0x54, 0xe4, 0xd3, 0x4c, 0xbc, 0x29, 0xb2, 0x0f, 0xe6, 0x42, 0xd9, 0x73, 0xa3, + 0xd8, 0x96, 0xf4, 0xce, 0xc2, 0x7c, 0x7b, 0x6e, 0x64, 0x9c, 0xb6, 0xfb, 0x21, 0xae, 0xf5, 0x57, + 0x7f, 0xbd, 0x71, 0x2b, 0xd7, 0x07, 0x4d, 0xe4, 0xd1, 0xec, 0x2e, 0xc5, 0xcb, 0xa9, 0xaf, 0xef, + 0x4e, 0xb5, 0x1f, 0xdc, 0x75, 0x23, 0x1f, 0xd5, 0xe1, 0xc0, 0x7e, 0x97, 0x93, 0x6a, 0xf6, 0x31, + 0xb4, 0x22, 0x25, 0x8f, 0x95, 0x88, 0xe3, 0x67, 0x54, 0x15, 0x6d, 0xb7, 0xfa, 0x86, 0xad, 0xde, + 0x24, 0xf9, 0x14, 0x05, 0x7c, 0x25, 0xca, 0x7f, 0xae, 0xfe, 0x04, 0xda, 0x8b, 0x2b, 0x7e, 0x9d, + 0xdd, 0x5b, 0xbd, 0x07, 0x8d, 0x74, 0x05, 0xaf, 0x1a, 0x58, 0xcf, 0x6f, 0xfb, 0x1f, 0x0a, 0x50, + 0x35, 0xf9, 0xc8, 0xee, 0x41, 0x23, 0x90, 0x9e, 0x8b, 0x06, 0x24, 0x37, 0x83, 0x77, 0xb2, 0x74, + 0xdd, 0xfa, 0x3c, 0x91, 0x99, 0xfd, 0xc8, 0xb0, 0x18, 0x9e, 0x7e, 0x38, 0x96, 0x49, 0xfe, 0xb4, + 0xb2, 0x41, 0xfd, 0x70, 0x2c, 0xb9, 0x11, 0xae, 0x3e, 0x84, 0xd6, 0xbc, 0x8a, 0x4b, 0xec, 0x7c, + 0x6f, 0x3e, 0xd0, 0xa9, 0x1a, 0xa4, 0x83, 0xf2, 0x66, 0xdf, 0x83, 0x46, 0xca, 0x67, 0x9b, 0x17, + 0x0d, 0x5f, 0xce, 0x8f, 0xcc, 0xd9, 0xea, 0x04, 0x00, 0x99, 0x69, 0x78, 0xcc, 0xe1, 0x15, 0x24, + 0xcc, 0x9a, 0x87, 0xf4, 0x9b, 0x6a, 0xaf, 0xab, 0x5d, 0x32, 0x65, 0x99, 0x13, 0xcd, 0xb6, 0x00, + 0x46, 0x69, 0xaa, 0xbf, 0xe4, 0x00, 0xc8, 0x21, 0x9c, 0x01, 0xd4, 0x13, 0x23, 0xd8, 0x3a, 0x34, + 0x63, 0x3b, 0x33, 0x76, 0xca, 0x38, 0x5d, 0x85, 0xe7, 0x59, 0xd8, 0xf1, 0x2a, 0x37, 0x3c, 0x16, + 0x73, 0x1d, 0x2f, 0x47, 0x0e, 0xb7, 0x02, 0xe7, 0x0b, 0xa8, 0x10, 0x03, 0x13, 0x34, 0xd6, 0xae, + 0xd2, 0xb6, 0x79, 0x36, 0xfd, 0xa1, 0x8c, 0x69, 0xda, 0xdd, 0x32, 0x86, 0x30, 0x37, 0x00, 0xf6, + 0x3e, 0x76, 0xa1, 0x23, 0xeb, 0xd1, 0xcb, 0x70, 0x28, 0x76, 0x3e, 0x81, 0x7a, 0xc2, 0xc6, 0x95, + 0x07, 0x7e, 0x28, 0xac, 0x89, 0x44, 0xe3, 0xa5, 0xc3, 0x3b, 0x71, 0x95, 0xeb, 0x69, 0x61, 0xda, + 0x94, 0x0a, 0xcf, 0x18, 0xce, 0x7b, 0xd0, 0xcc, 0xe5, 0x1d, 0x86, 0xdb, 0x53, 0xda, 0x46, 0x93, + 0xfd, 0xe6, 0xc3, 0xf9, 0x14, 0x56, 0xe6, 0x72, 0x00, 0x8b, 0x95, 0x3f, 0x4a, 0x8a, 0x95, 0x29, + 0x44, 0x17, 0xba, 0x2d, 0x06, 0xe5, 0x33, 0xe1, 0x9e, 0xda, 0x4e, 0x8b, 0x68, 0xe7, 0xf7, 0x78, + 0xb7, 0x4a, 0x3a, 0xe0, 0xff, 0x07, 0x38, 0xd1, 0x3a, 0x7a, 0x46, 0x2d, 0xb1, 0x55, 0xd6, 0x40, + 0x0e, 0x21, 0xd8, 0x0d, 0x68, 0xe2, 0x47, 0x6c, 0xe5, 0x46, 0x35, 0x8d, 0x88, 0x0d, 0xe0, 0xff, + 0xa0, 0x31, 0x4e, 0x87, 0x97, 0x6c, 0x0c, 0x24, 0xa3, 0xdf, 0x81, 0x7a, 0x28, 0xad, 0xcc, 0x74, + 0xe8, 0xb5, 0x50, 0xa6, 0xe3, 0xdc, 0x20, 0xb0, 0xb2, 0x8a, 0x19, 0xe7, 0x06, 0x01, 0x09, 0x9d, + 0x5b, 0xf0, 0xc6, 0x85, 0x5b, 0x22, 0x7b, 0x0b, 0xaa, 0x63, 0x3f, 0xd0, 0x54, 0x94, 0xf0, 0x46, + 0x60, 0xbf, 0x9c, 0x7f, 0x16, 0x00, 0xb2, 0xf8, 0xc1, 0xac, 0xc0, 0xea, 0x82, 0x98, 0x65, 0x53, + 0x4d, 0x02, 0xa8, 0x4f, 0xec, 0x39, 0x65, 0x23, 0xe3, 0xfa, 0x7c, 0xcc, 0x6d, 0x25, 0xc7, 0x98, + 0x39, 0xc1, 0xb6, 0xed, 0x09, 0xf6, 0x3a, 0x37, 0xb9, 0x74, 0x06, 0x6a, 0xb4, 0xf2, 0x17, 0x7b, + 0xc8, 0xd2, 0x99, 0x5b, 0xc9, 0xea, 0x43, 0x58, 0x99, 0x9b, 0xf2, 0x07, 0xd6, 0xac, 0xec, 0xbc, + 0xcd, 0xe7, 0xf2, 0x36, 0x54, 0xcd, 0x8b, 0x00, 0xdb, 0x80, 0x9a, 0xeb, 0x99, 0x34, 0xce, 0x1d, + 0x25, 0x28, 0xdc, 0x21, 0x36, 0x4f, 0xc4, 0xce, 0x9f, 0x8b, 0x00, 0x19, 0xff, 0x35, 0xba, 0xed, + 0xfb, 0xd0, 0x8a, 0x85, 0x27, 0xc3, 0x91, 0xab, 0x66, 0x24, 0xb5, 0x57, 0xd6, 0xcb, 0x86, 0x2c, + 0x20, 0x73, 0x9d, 0x77, 0xe9, 0xd5, 0x9d, 0xf7, 0x06, 0x94, 0x3d, 0x19, 0xcd, 0x6c, 0x69, 0x62, + 0xf3, 0x0b, 0xd9, 0x93, 0xd1, 0x6c, 0x7f, 0x89, 0x13, 0x82, 0x6d, 0x41, 0x75, 0x72, 0x4a, 0x6f, + 0x24, 0xe6, 0xae, 0x77, 0x6d, 0x1e, 0xfb, 0xe8, 0x14, 0xe9, 0xfd, 0x25, 0x6e, 0x51, 0xec, 0x16, + 0x54, 0x26, 0xa7, 0x23, 0x5f, 0xd9, 0xe2, 0x72, 0x75, 0x11, 0xde, 0xf5, 0x15, 0x3d, 0x89, 0x20, + 0x86, 0x39, 0x50, 0x54, 0x13, 0xfb, 0x20, 0xd2, 0x5e, 0xf0, 0xe6, 0x64, 0x7f, 0x89, 0x17, 0xd5, + 0x64, 0xb7, 0x0e, 0x55, 0xe3, 0x57, 0xe7, 0x1f, 0x25, 0x68, 0xcd, 0x5b, 0x89, 0x3b, 0x1b, 0x2b, + 0x2f, 0xd9, 0xd9, 0x58, 0x79, 0xe9, 0xa5, 0xa4, 0x98, 0xbb, 0x94, 0x38, 0x50, 0x91, 0x67, 0xa1, + 0x50, 0xf9, 0xc7, 0xa0, 0xbd, 0x13, 0x79, 0x16, 0x62, 0x63, 0x6c, 0x44, 0x73, 0x7d, 0x66, 0xc5, + 0xf6, 0x99, 0xef, 0xc3, 0xca, 0x58, 0x06, 0x81, 0x3c, 0x1b, 0xce, 0x26, 0x81, 0x1f, 0x9e, 0xda, + 0x66, 0x73, 0x9e, 0xc9, 0x36, 0xe0, 0xca, 0xc8, 0x57, 0x68, 0xce, 0x9e, 0x0c, 0xb5, 0x08, 0xe9, + 0xaa, 0x8b, 0xb8, 0x45, 0x36, 0xfb, 0x0c, 0xd6, 0x5d, 0xad, 0xc5, 0x24, 0xd2, 0x4f, 0xc2, 0xc8, + 0xf5, 0x4e, 0xbb, 0xd2, 0xa3, 0x2c, 0x9c, 0x44, 0xae, 0xf6, 0x8f, 0xfc, 0xc0, 0xd7, 0x33, 0x72, + 0x46, 0x9d, 0xbf, 0x12, 0xc7, 0x3e, 0x80, 0x96, 0xa7, 0x84, 0xab, 0x45, 0x57, 0xc4, 0xfa, 0xd0, + 0xd5, 0x27, 0x9d, 0x3a, 0x8d, 0x5c, 0xe0, 0xe2, 0x1a, 0x5c, 0xb4, 0xf6, 0x0b, 0x3f, 0x18, 0x79, + 0x78, 0xbd, 0x6c, 0x98, 0x35, 0xcc, 0x31, 0xd9, 0x16, 0x30, 0x62, 0xf4, 0x26, 0x91, 0x9e, 0xa5, + 0x50, 0x20, 0xe8, 0x25, 0x12, 0x3c, 0x70, 0xb5, 0x3f, 0x11, 0xb1, 0x76, 0x27, 0x11, 0xdd, 0x9c, + 0x4b, 0x3c, 0x63, 0xb0, 0x9b, 0xd0, 0xf6, 0x43, 0x2f, 0x98, 0x8e, 0xc4, 0xb3, 0x08, 0x17, 0xa2, + 0xc2, 0xb8, 0xb3, 0x4c, 0xa7, 0xca, 0x15, 0xcb, 0x3f, 0xb4, 0x6c, 0x84, 0x8a, 0xf3, 0x05, 0xe8, + 0x8a, 0x81, 0x5a, 0x7e, 0x02, 0x75, 0xbe, 0x2c, 0x40, 0x7b, 0x31, 0xf0, 0x70, 0xdb, 0x22, 0x5c, + 0xbc, 0xbd, 0x5c, 0x23, 0x9d, 0x6e, 0x65, 0x31, 0xb7, 0x95, 0x49, 0xbd, 0x2c, 0xe5, 0xea, 0x65, + 0x1a, 0x16, 0xe5, 0x97, 0x87, 0xc5, 0xdc, 0x42, 0x2b, 0x0b, 0x0b, 0x75, 0x7e, 0x57, 0x80, 0x2b, + 0x0b, 0xc1, 0xfd, 0x83, 0x2d, 0x5a, 0x87, 0xe6, 0xc4, 0x3d, 0x15, 0xe6, 0x69, 0x22, 0xb6, 0x25, + 0x24, 0xcf, 0xfa, 0x2f, 0xd8, 0x17, 0xc2, 0x72, 0x3e, 0xa3, 0x2e, 0xb5, 0x2d, 0x09, 0x90, 0x03, + 0xa9, 0x1f, 0xc8, 0xa9, 0xad, 0xc5, 0x49, 0x80, 0x24, 0xcc, 0x8b, 0x61, 0x54, 0xba, 0x24, 0x8c, + 0x9c, 0x03, 0xa8, 0x27, 0x06, 0xb2, 0x1b, 0xf6, 0xed, 0xa8, 0x90, 0x3d, 0x89, 0x3e, 0x89, 0x85, + 0x42, 0xdb, 0xcd, 0x43, 0xd2, 0xbb, 0x50, 0x31, 0x6d, 0x68, 0xf1, 0x22, 0xc2, 0x48, 0x9c, 0x21, + 0xd4, 0x2c, 0x87, 0x6d, 0x42, 0xf5, 0x68, 0x96, 0xbe, 0xa3, 0xd8, 0xe3, 0x02, 0xbf, 0x47, 0x16, + 0x81, 0x67, 0x90, 0x41, 0xb0, 0x6b, 0x50, 0x3e, 0x9a, 0xf5, 0xbb, 0xe6, 0x62, 0x89, 0x27, 0x19, + 0x7e, 0xed, 0x56, 0x8d, 0x41, 0xce, 0xe7, 0xb0, 0x9c, 0x1f, 0x97, 0x16, 0xf6, 0x42, 0xae, 0xb0, + 0xa7, 0x47, 0x76, 0xf1, 0x55, 0x37, 0x8c, 0x8f, 0x00, 0xe8, 0xa5, 0xf7, 0x75, 0x6f, 0x26, 0x3f, + 0x86, 0x9a, 0x7d, 0x21, 0x66, 0x1f, 0x2c, 0xbc, 0x78, 0xb7, 0xd2, 0xe7, 0xe3, 0xb9, 0x67, 0x6f, + 0xe7, 0x3e, 0xf6, 0xa8, 0x67, 0x42, 0x75, 0xfd, 0xf1, 0xf8, 0x75, 0xa7, 0xbb, 0x0f, 0xad, 0x27, + 0x51, 0xf4, 0xef, 0x8d, 0xfd, 0x39, 0x54, 0xcd, 0x43, 0x35, 0x8e, 0x09, 0xd0, 0x02, 0xbb, 0x07, + 0xcc, 0xf4, 0xb1, 0x79, 0x93, 0xb8, 0x01, 0x20, 0x72, 0x8a, 0xf3, 0xd9, 0xcd, 0x25, 0xe4, 0xbc, + 0x01, 0xdc, 0x00, 0x36, 0x37, 0xa0, 0x66, 0xdf, 0x44, 0x59, 0x03, 0x2a, 0x4f, 0x0e, 0x86, 0xbd, + 0xc7, 0xed, 0x25, 0x56, 0x87, 0xf2, 0xfe, 0x60, 0xf8, 0xb8, 0x5d, 0x40, 0xea, 0x60, 0x70, 0xd0, + 0x6b, 0x17, 0x37, 0x6f, 0xc2, 0x72, 0xfe, 0x55, 0x94, 0x35, 0xa1, 0x36, 0xdc, 0x39, 0xe8, 0xee, + 0x0e, 0x7e, 0xd6, 0x5e, 0x62, 0xcb, 0x50, 0xef, 0x1f, 0x0c, 0x7b, 0x7b, 0x4f, 0x78, 0xaf, 0x5d, + 0xd8, 0xfc, 0x29, 0x34, 0xd2, 0x87, 0x22, 0xd4, 0xb0, 0xdb, 0x3f, 0xe8, 0xb6, 0x97, 0x18, 0x40, + 0x75, 0xd8, 0xdb, 0xe3, 0x3d, 0xd4, 0x5b, 0x83, 0xd2, 0x70, 0xb8, 0xdf, 0x2e, 0xe2, 0xac, 0x7b, + 0x3b, 0x7b, 0xfb, 0xbd, 0x76, 0x09, 0xc9, 0xc7, 0x8f, 0x0e, 0x1f, 0x0c, 0xdb, 0xe5, 0xcd, 0x8f, + 0xe0, 0xca, 0xc2, 0x13, 0x0a, 0x8d, 0xde, 0xdf, 0xe1, 0x3d, 0xd4, 0xd4, 0x84, 0xda, 0x21, 0xef, + 0x3f, 0xdd, 0x79, 0xdc, 0x6b, 0x17, 0x50, 0xf0, 0xf9, 0x60, 0xef, 0x61, 0xaf, 0xdb, 0x2e, 0xee, + 0x5e, 0xff, 0xea, 0xc5, 0x5a, 0xe1, 0x9b, 0x17, 0x6b, 0x85, 0x6f, 0x5f, 0xac, 0x15, 0xfe, 0xf6, + 0x62, 0xad, 0xf0, 0xe5, 0xf7, 0x6b, 0x4b, 0xdf, 0x7c, 0xbf, 0xb6, 0xf4, 0xed, 0xf7, 0x6b, 0x4b, + 0x47, 0x55, 0xfa, 0xab, 0xe3, 0xc3, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x4f, 0x06, 0xaa, + 0x2a, 0x19, 0x00, 0x00, } func (m *Op) Marshal() (dAtA []byte, err error) { @@ -3377,6 +3390,16 @@ func (m *Meta) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.RemoveMountStubsRecursive { + i-- + if m.RemoveMountStubsRecursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } if len(m.CgroupParent) > 0 { i -= len(m.CgroupParent) copy(dAtA[i:], m.CgroupParent) @@ -5718,6 +5741,9 @@ func (m *Meta) Size() (n int) { if l > 0 { n += 1 + l + sovOps(uint64(l)) } + if m.RemoveMountStubsRecursive { + n += 2 + } return n } @@ -7771,6 +7797,26 @@ func (m *Meta) Unmarshal(dAtA []byte) error { } m.CgroupParent = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemoveMountStubsRecursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RemoveMountStubsRecursive = bool(v != 0) default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.proto b/vendor/github.com/moby/buildkit/solver/pb/ops.proto index d1e30068df..87cb771902 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.proto +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.proto @@ -63,6 +63,7 @@ message Meta { string hostname = 7; repeated Ulimit ulimit = 9; string cgroupParent = 10; + bool removeMountStubsRecursive = 11; } message HostIP { @@ -157,7 +158,7 @@ message SecretOpt { bool optional = 5; } -// SSHOpt defines options describing secret mounts +// SSHOpt defines options describing ssh mounts message SSHOpt { // ID of exposed ssh rule. Used for quering the value. string ID = 1; @@ -243,8 +244,8 @@ message Range { // Position is single location in a source file message Position { - int32 Line = 1; - int32 Character = 2; + int32 line = 1; + int32 character = 2; } message ExportCache { diff --git a/vendor/github.com/moby/buildkit/solver/progress.go b/vendor/github.com/moby/buildkit/solver/progress.go index 6e54349671..3fb954f867 100644 --- a/vendor/github.com/moby/buildkit/solver/progress.go +++ b/vendor/github.com/moby/buildkit/solver/progress.go @@ -3,6 +3,7 @@ package solver import ( "context" "io" + "sort" "time" "github.com/moby/buildkit/util/bklog" @@ -72,6 +73,22 @@ func (j *Job) Status(ctx context.Context, ch chan *client.SolveStatus) error { ss.Warnings = append(ss.Warnings, &v) } } + sort.Slice(ss.Vertexes, func(i, j int) bool { + if ss.Vertexes[i].Started == nil { + return true + } + if ss.Vertexes[j].Started == nil { + return false + } + return ss.Vertexes[i].Started.Before(*ss.Vertexes[j].Started) + }) + sort.Slice(ss.Statuses, func(i, j int) bool { + return ss.Statuses[i].Timestamp.Before(ss.Statuses[j].Timestamp) + }) + sort.Slice(ss.Logs, func(i, j int) bool { + return ss.Logs[i].Timestamp.Before(ss.Logs[j].Timestamp) + }) + select { case <-ctx.Done(): return ctx.Err() diff --git a/vendor/github.com/moby/buildkit/solver/result.go b/vendor/github.com/moby/buildkit/solver/result.go index 81766a30f4..2ba1ef9bc1 100644 --- a/vendor/github.com/moby/buildkit/solver/result.go +++ b/vendor/github.com/moby/buildkit/solver/result.go @@ -108,3 +108,26 @@ type SharedCachedResult struct { *SharedResult CachedResult } + +type splitResultProxy struct { + released int64 + sem *int64 + ResultProxy +} + +func (r *splitResultProxy) Release(ctx context.Context) error { + if atomic.AddInt64(&r.released, 1) > 1 { + err := errors.New("releasing already released reference") + bklog.G(ctx).Error(err) + return err + } + if atomic.AddInt64(r.sem, 1) == 2 { + return r.ResultProxy.Release(ctx) + } + return nil +} + +func SplitResultProxy(res ResultProxy) (ResultProxy, ResultProxy) { + sem := int64(0) + return &splitResultProxy{ResultProxy: res, sem: &sem}, &splitResultProxy{ResultProxy: res, sem: &sem} +} diff --git a/vendor/github.com/moby/buildkit/solver/result/attestation.go b/vendor/github.com/moby/buildkit/solver/result/attestation.go new file mode 100644 index 0000000000..77af74da19 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/result/attestation.go @@ -0,0 +1,79 @@ +package result + +import ( + "reflect" + + pb "github.com/moby/buildkit/frontend/gateway/pb" + digest "github.com/opencontainers/go-digest" +) + +const ( + AttestationReasonKey = "reason" + AttestationSBOMCore = "sbom-core" + AttestationInlineOnlyKey = "inline-only" +) + +const ( + AttestationReasonSBOM = "sbom" + AttestationReasonProvenance = "provenance" +) + +type Attestation[T any] struct { + Kind pb.AttestationKind + + Metadata map[string][]byte + + Ref T + Path string + ContentFunc func() ([]byte, error) + + InToto InTotoAttestation +} + +type InTotoAttestation struct { + PredicateType string + Subjects []InTotoSubject +} + +type InTotoSubject struct { + Kind pb.InTotoSubjectKind + + Name string + Digest []digest.Digest +} + +func ToDigestMap(ds ...digest.Digest) map[string]string { + m := map[string]string{} + for _, d := range ds { + m[d.Algorithm().String()] = d.Encoded() + } + return m +} + +func FromDigestMap(m map[string]string) []digest.Digest { + var ds []digest.Digest + for k, v := range m { + ds = append(ds, digest.NewDigestFromEncoded(digest.Algorithm(k), v)) + } + return ds +} + +func ConvertAttestation[U any, V any](a *Attestation[U], fn func(U) (V, error)) (*Attestation[V], error) { + var ref V + if reflect.ValueOf(a.Ref).IsValid() { + var err error + ref, err = fn(a.Ref) + if err != nil { + return nil, err + } + } + + return &Attestation[V]{ + Kind: a.Kind, + Metadata: a.Metadata, + Ref: ref, + Path: a.Path, + ContentFunc: a.ContentFunc, + InToto: a.InToto, + }, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/result/result.go b/vendor/github.com/moby/buildkit/solver/result/result.go new file mode 100644 index 0000000000..d5fe2d03cf --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/result/result.go @@ -0,0 +1,180 @@ +package result + +import ( + "reflect" + "sync" + + "github.com/pkg/errors" +) + +type Result[T any] struct { + mu sync.Mutex + Ref T + Refs map[string]T + Metadata map[string][]byte + Attestations map[string][]Attestation[T] +} + +func (r *Result[T]) AddMeta(k string, v []byte) { + r.mu.Lock() + if r.Metadata == nil { + r.Metadata = map[string][]byte{} + } + r.Metadata[k] = v + r.mu.Unlock() +} + +func (r *Result[T]) AddRef(k string, ref T) { + r.mu.Lock() + if r.Refs == nil { + r.Refs = map[string]T{} + } + r.Refs[k] = ref + r.mu.Unlock() +} + +func (r *Result[T]) AddAttestation(k string, v Attestation[T]) { + r.mu.Lock() + if r.Attestations == nil { + r.Attestations = map[string][]Attestation[T]{} + } + r.Attestations[k] = append(r.Attestations[k], v) + r.mu.Unlock() +} + +func (r *Result[T]) SetRef(ref T) { + r.Ref = ref +} + +func (r *Result[T]) SingleRef() (T, error) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.Refs != nil && !reflect.ValueOf(r.Ref).IsValid() { + var t T + return t, errors.Errorf("invalid map result") + } + return r.Ref, nil +} + +func (r *Result[T]) FindRef(key string) (T, bool) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.Refs != nil { + if ref, ok := r.Refs[key]; ok { + return ref, true + } + if len(r.Refs) == 1 { + for _, ref := range r.Refs { + return ref, true + } + } + var t T + return t, false + } + return r.Ref, true +} + +func (r *Result[T]) EachRef(fn func(T) error) (err error) { + if reflect.ValueOf(r.Ref).IsValid() { + err = fn(r.Ref) + } + for _, r := range r.Refs { + if reflect.ValueOf(r).IsValid() { + if err1 := fn(r); err1 != nil && err == nil { + err = err1 + } + } + } + for _, as := range r.Attestations { + for _, a := range as { + if reflect.ValueOf(a.Ref).IsValid() { + if err1 := fn(a.Ref); err1 != nil && err == nil { + err = err1 + } + } + } + } + return err +} + +// EachRef iterates over references in both a and b. +// a and b are assumed to be of the same size and map their references +// to the same set of keys +func EachRef[U any, V any](a *Result[U], b *Result[V], fn func(U, V) error) (err error) { + if reflect.ValueOf(a.Ref).IsValid() && reflect.ValueOf(b.Ref).IsValid() { + err = fn(a.Ref, b.Ref) + } + for k, r := range a.Refs { + r2, ok := b.Refs[k] + if !ok { + continue + } + if reflect.ValueOf(r).IsValid() && reflect.ValueOf(r2).IsValid() { + if err1 := fn(r, r2); err1 != nil && err == nil { + err = err1 + } + } + } + for k, atts := range a.Attestations { + atts2, ok := b.Attestations[k] + if !ok { + continue + } + for i, att := range atts { + if i >= len(atts2) { + break + } + att2 := atts2[i] + if reflect.ValueOf(att.Ref).IsValid() && reflect.ValueOf(att2.Ref).IsValid() { + if err1 := fn(att.Ref, att2.Ref); err1 != nil && err == nil { + err = err1 + } + } + } + } + return err +} + +func ConvertResult[U any, V any](r *Result[U], fn func(U) (V, error)) (*Result[V], error) { + r2 := &Result[V]{} + var err error + + if reflect.ValueOf(r.Ref).IsValid() { + r2.Ref, err = fn(r.Ref) + if err != nil { + return nil, err + } + } + + if r.Refs != nil { + r2.Refs = map[string]V{} + } + for k, r := range r.Refs { + if !reflect.ValueOf(r).IsValid() { + continue + } + r2.Refs[k], err = fn(r) + if err != nil { + return nil, err + } + } + + if r.Attestations != nil { + r2.Attestations = map[string][]Attestation[V]{} + } + for k, as := range r.Attestations { + for _, a := range as { + a2, err := ConvertAttestation(&a, fn) + if err != nil { + return nil, err + } + r2.Attestations[k] = append(r2.Attestations[k], *a2) + } + } + + r2.Metadata = r.Metadata + + return r2, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/scheduler.go b/vendor/github.com/moby/buildkit/solver/scheduler.go index d617cd912c..2d0ee07afe 100644 --- a/vendor/github.com/moby/buildkit/solver/scheduler.go +++ b/vendor/github.com/moby/buildkit/solver/scheduler.go @@ -222,8 +222,7 @@ func (s *scheduler) build(ctx context.Context, edge Edge) (CachedResult, error) wait := make(chan struct{}) - var p *pipe.Pipe - p = s.newPipe(e, nil, pipe.Request{Payload: &edgeRequest{desiredState: edgeStatusComplete}}) + p := s.newPipe(e, nil, pipe.Request{Payload: &edgeRequest{desiredState: edgeStatusComplete}}) p.OnSendCompletion = func() { p.Receiver.Receive() if p.Receiver.Status().Completed { diff --git a/vendor/github.com/moby/buildkit/solver/types.go b/vendor/github.com/moby/buildkit/solver/types.go index a20c1020f2..6635daef0e 100644 --- a/vendor/github.com/moby/buildkit/solver/types.go +++ b/vendor/github.com/moby/buildkit/solver/types.go @@ -72,11 +72,17 @@ type CachedResult interface { CacheKeys() []ExportableCacheKey } +type CachedResultWithProvenance interface { + CachedResult + WalkProvenance(context.Context, func(ProvenanceProvider) error) error +} + type ResultProxy interface { + ID() string Result(context.Context) (CachedResult, error) Release(context.Context) error Definition() *pb.Definition - BuildSources() BuildSources + Provenance() interface{} } // CacheExportMode is the type for setting cache exporting modes @@ -104,6 +110,8 @@ type CacheExportOpt struct { // CompressionOpt is an option to specify the compression of the object to load. // If specified, all objects that meet the option will be cached. CompressionOpt *compression.Config + // ExportRoots defines if records for root vertexes should be exported. + ExportRoots bool } // CacheExporter can export the artifacts of the build chain @@ -120,7 +128,7 @@ type CacheExporterTarget interface { // CacheExporterRecord is a single object being exported type CacheExporterRecord interface { - AddResult(createdAt time.Time, result *Remote) + AddResult(vtx digest.Digest, index int, createdAt time.Time, result *Remote) LinkFrom(src CacheExporterRecord, index int, selector string) } @@ -159,6 +167,10 @@ type Op interface { Acquire(ctx context.Context) (release ReleaseFunc, err error) } +type ProvenanceProvider interface { + IsProvenanceProvider() +} + type ResultBasedCacheFunc func(context.Context, Result, session.Group) (digest.Digest, error) type PreprocessFunc func(context.Context, Result, session.Group) error @@ -196,15 +208,8 @@ type CacheMap struct { // such as oci descriptor content providers and progress writers to be passed to // the cache. Opts should not have any impact on the computed cache key. Opts CacheOpts - - // BuildSources contains build dependencies that will be set from source - // operation. - BuildSources BuildSources } -// BuildSources contains solved build dependencies. -type BuildSources map[string]string - // ExportableCacheKey is a cache key connected with an exporter that can export // a chain of cacherecords pointing to that key type ExportableCacheKey struct { diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource.go b/vendor/github.com/moby/buildkit/source/git/gitsource.go index 7b52c11330..dd35fe55f7 100644 --- a/vendor/github.com/moby/buildkit/source/git/gitsource.go +++ b/vendor/github.com/moby/buildkit/source/git/gitsource.go @@ -6,7 +6,6 @@ import ( "encoding/base64" "fmt" "io" - "io/ioutil" "net/url" "os" "os/exec" @@ -277,7 +276,7 @@ func (gs *gitSourceHandler) mountKnownHosts(ctx context.Context) (string, func() if gs.src.KnownSSHHosts == "" { return "", nil, errors.Errorf("no configured known hosts forwarded from the client") } - knownHosts, err := ioutil.TempFile("", "") + knownHosts, err := os.CreateTemp("", "") if err != nil { return "", nil, err } @@ -550,7 +549,7 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out } else { cd := checkoutDir if subdir != "." { - cd, err = ioutil.TempDir(cd, "checkout") + cd, err = os.MkdirTemp(cd, "checkout") if err != nil { return nil, errors.Wrapf(err, "failed to create temporary checkout dir") } @@ -595,7 +594,7 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out if idmap := mount.IdentityMapping(); idmap != nil { u := idmap.RootPair() - err := filepath.Walk(gitDir, func(p string, f os.FileInfo, err error) error { + err := filepath.WalkDir(gitDir, func(p string, _ os.DirEntry, _ error) error { return os.Lchown(p, u.UID, u.GID) }) if err != nil { diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go b/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go index 23f289c55d..cb49917573 100644 --- a/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go +++ b/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go @@ -28,7 +28,7 @@ func gitMain() { unix.Umask(0022) // Reexec git command - cmd := exec.Command(os.Args[1], os.Args[2:]...) + cmd := exec.Command(os.Args[1], os.Args[2:]...) //nolint:gosec // reexec cmd.SysProcAttr = &unix.SysProcAttr{ Setpgid: true, Pdeathsig: unix.SIGTERM, diff --git a/vendor/github.com/moby/buildkit/source/identifier.go b/vendor/github.com/moby/buildkit/source/identifier.go index 1032399e11..aad9f226ff 100644 --- a/vendor/github.com/moby/buildkit/source/identifier.go +++ b/vendor/github.com/moby/buildkit/source/identifier.go @@ -50,6 +50,8 @@ func FromString(s string) (Identifier, error) { return NewHTTPIdentifier(parts[1], true) case srctypes.HTTPScheme: return NewHTTPIdentifier(parts[1], false) + case srctypes.OCIScheme: + return NewOCIIdentifier(parts[1]) default: return nil, errors.Wrapf(errNotFound, "unknown schema %s", parts[0]) } @@ -85,6 +87,15 @@ func FromLLB(op *pb.Op_Source, platform *pb.Platform) (Identifier, error) { return nil, err } id.RecordType = rt + case pb.AttrImageLayerLimit: + l, err := strconv.Atoi(v) + if err != nil { + return nil, errors.Wrapf(err, "invalid layer limit %s", v) + } + if l <= 0 { + return nil, errors.Errorf("invalid layer limit %s", v) + } + id.LayerLimit = &l } } } @@ -182,6 +193,34 @@ func FromLLB(op *pb.Op_Source, platform *pb.Platform) (Identifier, error) { } } } + if id, ok := id.(*OCIIdentifier); ok { + if platform != nil { + id.Platform = &ocispecs.Platform{ + OS: platform.OS, + Architecture: platform.Architecture, + Variant: platform.Variant, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + } + } + for k, v := range op.Source.Attrs { + switch k { + case pb.AttrOCILayoutSessionID: + id.SessionID = v + case pb.AttrOCILayoutStoreID: + id.StoreID = v + case pb.AttrOCILayoutLayerLimit: + l, err := strconv.Atoi(v) + if err != nil { + return nil, errors.Wrapf(err, "invalid layer limit %s", v) + } + if l <= 0 { + return nil, errors.Errorf("invalid layer limit %s", v) + } + id.LayerLimit = &l + } + } + } return id, nil } @@ -190,6 +229,7 @@ type ImageIdentifier struct { Platform *ocispecs.Platform ResolveMode ResolveMode RecordType client.UsageRecordType + LayerLimit *int } func NewImageIdentifier(str string) (*ImageIdentifier, error) { @@ -248,6 +288,30 @@ func (*HTTPIdentifier) ID() string { return srctypes.HTTPSScheme } +type OCIIdentifier struct { + Reference reference.Spec + Platform *ocispecs.Platform + SessionID string + StoreID string + LayerLimit *int +} + +func NewOCIIdentifier(str string) (*OCIIdentifier, error) { + ref, err := reference.Parse(str) + if err != nil { + return nil, errors.WithStack(err) + } + + if ref.Object == "" { + return nil, errors.WithStack(reference.ErrObjectRequired) + } + return &OCIIdentifier{Reference: ref}, nil +} + +func (*OCIIdentifier) ID() string { + return srctypes.OCIScheme +} + func (r ResolveMode) String() string { switch r { case ResolveModeDefault: diff --git a/vendor/github.com/moby/buildkit/source/manager.go b/vendor/github.com/moby/buildkit/source/manager.go index 3f4a0cb478..6a9c831c90 100644 --- a/vendor/github.com/moby/buildkit/source/manager.go +++ b/vendor/github.com/moby/buildkit/source/manager.go @@ -16,7 +16,7 @@ type Source interface { } type SourceInstance interface { - CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) + CacheKey(ctx context.Context, g session.Group, index int) (key, pin string, opts solver.CacheOpts, done bool, err error) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) } diff --git a/vendor/github.com/moby/buildkit/source/types/types.go b/vendor/github.com/moby/buildkit/source/types/types.go index b96eac2333..ca91accf58 100644 --- a/vendor/github.com/moby/buildkit/source/types/types.go +++ b/vendor/github.com/moby/buildkit/source/types/types.go @@ -6,4 +6,5 @@ const ( LocalScheme = "local" HTTPScheme = "http" HTTPSScheme = "https" + OCIScheme = "oci-layout" ) diff --git a/vendor/github.com/moby/buildkit/sourcepolicy/engine.go b/vendor/github.com/moby/buildkit/sourcepolicy/engine.go new file mode 100644 index 0000000000..829e851065 --- /dev/null +++ b/vendor/github.com/moby/buildkit/sourcepolicy/engine.go @@ -0,0 +1,152 @@ +package sourcepolicy + +import ( + "context" + + "github.com/moby/buildkit/solver/pb" + spb "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/moby/buildkit/util/bklog" + "github.com/pkg/errors" +) + +var ( + // ErrSourceDenied is returned by the policy engine when a source is denied by the policy. + ErrSourceDenied = errors.New("source denied by policy") + + // ErrTooManyOps is returned by the policy engine when there are too many converts for a single source op. + ErrTooManyOps = errors.New("too many operations") +) + +// Engine is the source policy engine. +// It is responsible for evaluating a source policy against a source operation. +// Create one with `NewEngine` +// +// Rule matching is delegated to the `Matcher` interface. +// Mutations are delegated to the `Mutater` interface. +type Engine struct { + pol []*spb.Policy + sources map[string]*selectorCache +} + +// NewEngine creates a new source policy engine. +func NewEngine(pol []*spb.Policy) *Engine { + return &Engine{ + pol: pol, + } +} + +// TODO: The key here can't be used to cache attr constraint regexes. +func (e *Engine) selectorCache(src *spb.Selector) *selectorCache { + if e.sources == nil { + e.sources = map[string]*selectorCache{} + } + + key := src.MatchType.String() + " " + src.Identifier + + if s, ok := e.sources[key]; ok { + return s + } + + s := &selectorCache{Selector: src} + + e.sources[key] = s + return s +} + +// Evaluate evaluates a source operation against the policy. +// +// Policies are re-evaluated for each convert rule. +// Evaluate will error if the there are too many converts for a single source op to prevent infinite loops. +// This function may error out even if the op was mutated, in which case `true` will be returned along with the error. +// +// An error is returned when the source is denied by the policy. +func (e *Engine) Evaluate(ctx context.Context, op *pb.Op) (bool, error) { + if len(e.pol) == 0 { + return false, nil + } + + var mutated bool + const maxIterr = 20 + + for i := 0; ; i++ { + if i > maxIterr { + return mutated, errors.Wrapf(ErrTooManyOps, "too many mutations on a single source") + } + + srcOp := op.GetSource() + if srcOp == nil { + return false, nil + } + if i == 0 { + ctx = bklog.WithLogger(ctx, bklog.G(ctx).WithField("orig", *srcOp).WithField("updated", op.GetSource())) + } + + mut, err := e.evaluatePolicies(ctx, srcOp) + if mut { + mutated = true + } + if err != nil { + return mutated, err + } + if !mut { + break + } + } + + return mutated, nil +} + +func (e *Engine) evaluatePolicies(ctx context.Context, srcOp *pb.SourceOp) (bool, error) { + for _, pol := range e.pol { + mut, err := e.evaluatePolicy(ctx, pol, srcOp) + if mut || err != nil { + return mut, err + } + } + return false, nil +} + +// evaluatePolicy evaluates a single policy against a source operation. +// If the source is mutated the policy is short-circuited and `true` is returned. +// If the source is denied, an error will be returned. +// +// For Allow/Deny rules, the last matching rule wins. +// E.g. `ALLOW foo; DENY foo` will deny `foo`, `DENY foo; ALLOW foo` will allow `foo`. +func (e *Engine) evaluatePolicy(ctx context.Context, pol *spb.Policy, srcOp *pb.SourceOp) (bool, error) { + ident := srcOp.GetIdentifier() + + ctx = bklog.WithLogger(ctx, bklog.G(ctx).WithFields(map[string]interface{}{ + "ref": ident, + })) + + var deny bool + for _, rule := range pol.Rules { + selector := e.selectorCache(rule.Selector) + matched, err := match(ctx, selector, ident, srcOp.Attrs) + if err != nil { + return false, errors.Wrap(err, "error matching source policy") + } + if !matched { + continue + } + + switch rule.Action { + case spb.PolicyAction_ALLOW: + deny = false + case spb.PolicyAction_DENY: + deny = true + case spb.PolicyAction_CONVERT: + mut, err := mutate(ctx, srcOp, rule, selector, ident) + if err != nil || mut { + return mut, errors.Wrap(err, "error mutating source policy") + } + default: + return false, errors.Errorf("source policy: rule %s %s: unknown type %q", rule.Action, rule.Selector.Identifier, ident) + } + } + + if deny { + return false, errors.Wrapf(ErrSourceDenied, "source %q denied by policy", ident) + } + return false, nil +} diff --git a/vendor/github.com/moby/buildkit/sourcepolicy/formatter.go b/vendor/github.com/moby/buildkit/sourcepolicy/formatter.go new file mode 100644 index 0000000000..487e7a3685 --- /dev/null +++ b/vendor/github.com/moby/buildkit/sourcepolicy/formatter.go @@ -0,0 +1,92 @@ +package sourcepolicy + +import ( + "regexp" + + spb "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/moby/buildkit/util/wildcard" + "github.com/pkg/errors" +) + +// Source wraps a a protobuf source in order to store cached state such as the compiled regexes. +type selectorCache struct { + *spb.Selector + + re *regexp.Regexp + w *wildcardCache +} + +// Format formats the provided ref according to the match/type of the source. +// +// For example, if the source is a wildcard, the ref will be formatted with the wildcard in the source replacing the parameters in the destination. +// +// matcher: wildcard source: "docker.io/library/golang:*" match: "docker.io/library/golang:1.19" format: "docker.io/library/golang:${1}-alpine" result: "docker.io/library/golang:1.19-alpine" +func (s *selectorCache) Format(match, format string) (string, error) { + switch s.MatchType { + case spb.MatchType_EXACT: + return s.Identifier, nil + case spb.MatchType_REGEX: + re, err := s.regex() + if err != nil { + return "", err + } + return re.ReplaceAllString(match, format), nil + case spb.MatchType_WILDCARD: + w, err := s.wildcard() + if err != nil { + return "", err + } + m := w.Match(match) + if m == nil { + return match, nil + } + + return m.Format(format) + } + return "", errors.Errorf("unknown match type: %s", s.MatchType) +} + +// wildcardCache wraps a wildcard.Wildcard to cache returned matches by ref. +// This way a match only needs to be computed once per ref. +type wildcardCache struct { + w *wildcard.Wildcard + m map[string]*wildcard.Match +} + +func (w *wildcardCache) Match(ref string) *wildcard.Match { + if w.m == nil { + w.m = make(map[string]*wildcard.Match) + } + + if m, ok := w.m[ref]; ok { + return m + } + + m := w.w.Match(ref) + w.m[ref] = m + return m +} + +func (s *selectorCache) wildcard() (*wildcardCache, error) { + if s.w != nil { + return s.w, nil + } + w, err := wildcard.New(s.Identifier) + if err != nil { + return nil, err + } + s.w = &wildcardCache{w: w} + return s.w, nil +} + +func (s *selectorCache) regex() (*regexp.Regexp, error) { + if s.re != nil { + return s.re, nil + } + re, err := regexp.Compile(s.Identifier) + if err != nil { + return nil, err + } + s.re = re + return re, nil +} diff --git a/vendor/github.com/moby/buildkit/sourcepolicy/matcher.go b/vendor/github.com/moby/buildkit/sourcepolicy/matcher.go new file mode 100644 index 0000000000..79ab4032a5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/sourcepolicy/matcher.go @@ -0,0 +1,58 @@ +package sourcepolicy + +import ( + "context" + "regexp" + + spb "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/pkg/errors" +) + +func match(ctx context.Context, src *selectorCache, ref string, attrs map[string]string) (bool, error) { + for _, c := range src.Constraints { + switch c.Condition { + case spb.AttrMatch_EQUAL: + if attrs[c.Key] != c.Value { + return false, nil + } + case spb.AttrMatch_NOTEQUAL: + if attrs[c.Key] == c.Value { + return false, nil + } + case spb.AttrMatch_MATCHES: + // TODO: Cache the compiled regex + matches, err := regexp.MatchString(c.Value, attrs[c.Key]) + if err != nil { + return false, errors.Errorf("invalid regex %q: %v", c.Value, err) + } + if !matches { + return false, nil + } + default: + return false, errors.Errorf("unknown attr condition: %s", c.Condition) + } + } + + if src.Identifier == ref { + return true, nil + } + + switch src.MatchType { + case spb.MatchType_EXACT: + return false, nil + case spb.MatchType_REGEX: + re, err := src.regex() + if err != nil { + return false, err + } + return re.MatchString(ref), nil + case spb.MatchType_WILDCARD: + w, err := src.wildcard() + if err != nil { + return false, err + } + return w.Match(ref) != nil, nil + default: + return false, errors.Errorf("unknown match type: %s", src.MatchType) + } +} diff --git a/vendor/github.com/moby/buildkit/sourcepolicy/mutate.go b/vendor/github.com/moby/buildkit/sourcepolicy/mutate.go new file mode 100644 index 0000000000..7722e6dd9b --- /dev/null +++ b/vendor/github.com/moby/buildkit/sourcepolicy/mutate.go @@ -0,0 +1,50 @@ +package sourcepolicy + +import ( + "context" + + "github.com/moby/buildkit/solver/pb" + spb "github.com/moby/buildkit/sourcepolicy/pb" + "github.com/moby/buildkit/util/bklog" + "github.com/pkg/errors" +) + +// mutate is a MutateFn which converts the source operation to the identifier and attributes provided by the policy. +// If there is no change, then the return value should be false and is not considered an error. +func mutate(ctx context.Context, op *pb.SourceOp, rule *spb.Rule, selector *selectorCache, ref string) (bool, error) { + if rule.Updates == nil { + return false, errors.Errorf("missing destination for convert rule") + } + + dest := rule.Updates.Identifier + if dest == "" { + dest = rule.Selector.Identifier + } + dest, err := selector.Format(ref, dest) + if err != nil { + return false, errors.Wrap(err, "error formatting destination") + } + + bklog.G(ctx).Debugf("sourcepolicy: converting %s to %s, pattern: %s", ref, dest, rule.Updates.Identifier) + + var mutated bool + if op.Identifier != dest && dest != "" { + mutated = true + op.Identifier = dest + } + + if rule.Updates.Attrs != nil { + if op.Attrs == nil { + op.Attrs = make(map[string]string, len(rule.Updates.Attrs)) + } + for k, v := range rule.Updates.Attrs { + if op.Attrs[k] != v { + bklog.G(ctx).Debugf("setting attr %s=%s", k, v) + op.Attrs[k] = v + mutated = true + } + } + } + + return mutated, nil +} diff --git a/vendor/github.com/moby/buildkit/sourcepolicy/pb/generate.go b/vendor/github.com/moby/buildkit/sourcepolicy/pb/generate.go new file mode 100644 index 0000000000..041c41b80e --- /dev/null +++ b/vendor/github.com/moby/buildkit/sourcepolicy/pb/generate.go @@ -0,0 +1,3 @@ +package moby_buildkit_v1_sourcepolicy //nolint:revive + +//go:generate protoc -I=. --gogofaster_out=plugins=grpc:. policy.proto diff --git a/vendor/github.com/moby/buildkit/sourcepolicy/pb/json.go b/vendor/github.com/moby/buildkit/sourcepolicy/pb/json.go new file mode 100644 index 0000000000..a9f84834e7 --- /dev/null +++ b/vendor/github.com/moby/buildkit/sourcepolicy/pb/json.go @@ -0,0 +1,62 @@ +package moby_buildkit_v1_sourcepolicy //nolint:revive + +import ( + "github.com/gogo/protobuf/proto" + "github.com/pkg/errors" +) + +// MarshalJSON implements json.Marshaler with custom marshaling for PolicyAction. +// It gives the string form of the enum value. +func (a PolicyAction) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(PolicyAction_name, int32(a)) +} + +func (a *PolicyAction) UnmarshalJSON(data []byte) error { + val, err := proto.UnmarshalJSONEnum(PolicyAction_value, data, a.String()) + if err != nil { + return err + } + + _, ok := PolicyAction_name[val] + if !ok { + return errors.Errorf("invalid PolicyAction value: %d", val) + } + *a = PolicyAction(val) + return nil +} + +func (a AttrMatch) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(AttrMatch_name, int32(a)) +} + +func (a *AttrMatch) UnmarshalJSON(data []byte) error { + val, err := proto.UnmarshalJSONEnum(AttrMatch_value, data, a.String()) + if err != nil { + return err + } + + _, ok := AttrMatch_name[val] + if !ok { + return errors.Errorf("invalid AttrMatch value: %d", val) + } + *a = AttrMatch(val) + return nil +} + +func (a MatchType) MarshalJSON() ([]byte, error) { + return proto.MarshalJSONEnum(MatchType_name, int32(a)) +} + +func (a *MatchType) UnmarshalJSON(data []byte) error { + val, err := proto.UnmarshalJSONEnum(MatchType_value, data, a.String()) + if err != nil { + return err + } + + _, ok := AttrMatch_name[val] + if !ok { + return errors.Errorf("invalid MatchType value: %d", val) + } + *a = MatchType(val) + return nil +} diff --git a/vendor/github.com/moby/buildkit/sourcepolicy/pb/policy.pb.go b/vendor/github.com/moby/buildkit/sourcepolicy/pb/policy.pb.go new file mode 100644 index 0000000000..8b77afe864 --- /dev/null +++ b/vendor/github.com/moby/buildkit/sourcepolicy/pb/policy.pb.go @@ -0,0 +1,1615 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: policy.proto + +package moby_buildkit_v1_sourcepolicy + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// PolicyAction defines the action to take when a source is matched +type PolicyAction int32 + +const ( + PolicyAction_ALLOW PolicyAction = 0 + PolicyAction_DENY PolicyAction = 1 + PolicyAction_CONVERT PolicyAction = 2 +) + +var PolicyAction_name = map[int32]string{ + 0: "ALLOW", + 1: "DENY", + 2: "CONVERT", +} + +var PolicyAction_value = map[string]int32{ + "ALLOW": 0, + "DENY": 1, + "CONVERT": 2, +} + +func (x PolicyAction) String() string { + return proto.EnumName(PolicyAction_name, int32(x)) +} + +func (PolicyAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{0} +} + +// AttrMatch defines the condition to match a source attribute +type AttrMatch int32 + +const ( + AttrMatch_EQUAL AttrMatch = 0 + AttrMatch_NOTEQUAL AttrMatch = 1 + AttrMatch_MATCHES AttrMatch = 2 +) + +var AttrMatch_name = map[int32]string{ + 0: "EQUAL", + 1: "NOTEQUAL", + 2: "MATCHES", +} + +var AttrMatch_value = map[string]int32{ + "EQUAL": 0, + "NOTEQUAL": 1, + "MATCHES": 2, +} + +func (x AttrMatch) String() string { + return proto.EnumName(AttrMatch_name, int32(x)) +} + +func (AttrMatch) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{1} +} + +// Match type is used to determine how a rule source is matched +type MatchType int32 + +const ( + // WILDCARD is the default matching type. + // It may first attempt to due an exact match but will follow up with a wildcard match + // For something more powerful, use REGEX + MatchType_WILDCARD MatchType = 0 + // EXACT treats the source identifier as a litteral string match + MatchType_EXACT MatchType = 1 + // REGEX treats the source identifier as a regular expression + // With regex matching you can also use match groups to replace values in the destination identifier + MatchType_REGEX MatchType = 2 +) + +var MatchType_name = map[int32]string{ + 0: "WILDCARD", + 1: "EXACT", + 2: "REGEX", +} + +var MatchType_value = map[string]int32{ + "WILDCARD": 0, + "EXACT": 1, + "REGEX": 2, +} + +func (x MatchType) String() string { + return proto.EnumName(MatchType_name, int32(x)) +} + +func (MatchType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{2} +} + +// Rule defines the action(s) to take when a source is matched +type Rule struct { + Action PolicyAction `protobuf:"varint,1,opt,name=action,proto3,enum=moby.buildkit.v1.sourcepolicy.PolicyAction" json:"action,omitempty"` + Selector *Selector `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` + Updates *Update `protobuf:"bytes,3,opt,name=updates,proto3" json:"updates,omitempty"` +} + +func (m *Rule) Reset() { *m = Rule{} } +func (m *Rule) String() string { return proto.CompactTextString(m) } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{0} +} +func (m *Rule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Rule.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Rule.Merge(m, src) +} +func (m *Rule) XXX_Size() int { + return m.Size() +} +func (m *Rule) XXX_DiscardUnknown() { + xxx_messageInfo_Rule.DiscardUnknown(m) +} + +var xxx_messageInfo_Rule proto.InternalMessageInfo + +func (m *Rule) GetAction() PolicyAction { + if m != nil { + return m.Action + } + return PolicyAction_ALLOW +} + +func (m *Rule) GetSelector() *Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (m *Rule) GetUpdates() *Update { + if m != nil { + return m.Updates + } + return nil +} + +// Update contains updates to the matched build step after rule is applied +type Update struct { + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + Attrs map[string]string `protobuf:"bytes,2,rep,name=attrs,proto3" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *Update) Reset() { *m = Update{} } +func (m *Update) String() string { return proto.CompactTextString(m) } +func (*Update) ProtoMessage() {} +func (*Update) Descriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{1} +} +func (m *Update) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Update) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Update.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Update) XXX_Merge(src proto.Message) { + xxx_messageInfo_Update.Merge(m, src) +} +func (m *Update) XXX_Size() int { + return m.Size() +} +func (m *Update) XXX_DiscardUnknown() { + xxx_messageInfo_Update.DiscardUnknown(m) +} + +var xxx_messageInfo_Update proto.InternalMessageInfo + +func (m *Update) GetIdentifier() string { + if m != nil { + return m.Identifier + } + return "" +} + +func (m *Update) GetAttrs() map[string]string { + if m != nil { + return m.Attrs + } + return nil +} + +// Selector identifies a source to match a policy to +type Selector struct { + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // MatchType is the type of match to perform on the source identifier + MatchType MatchType `protobuf:"varint,2,opt,name=match_type,json=matchType,proto3,enum=moby.buildkit.v1.sourcepolicy.MatchType" json:"match_type,omitempty"` + Constraints []*AttrConstraint `protobuf:"bytes,3,rep,name=constraints,proto3" json:"constraints,omitempty"` +} + +func (m *Selector) Reset() { *m = Selector{} } +func (m *Selector) String() string { return proto.CompactTextString(m) } +func (*Selector) ProtoMessage() {} +func (*Selector) Descriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{2} +} +func (m *Selector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Selector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Selector.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Selector) XXX_Merge(src proto.Message) { + xxx_messageInfo_Selector.Merge(m, src) +} +func (m *Selector) XXX_Size() int { + return m.Size() +} +func (m *Selector) XXX_DiscardUnknown() { + xxx_messageInfo_Selector.DiscardUnknown(m) +} + +var xxx_messageInfo_Selector proto.InternalMessageInfo + +func (m *Selector) GetIdentifier() string { + if m != nil { + return m.Identifier + } + return "" +} + +func (m *Selector) GetMatchType() MatchType { + if m != nil { + return m.MatchType + } + return MatchType_WILDCARD +} + +func (m *Selector) GetConstraints() []*AttrConstraint { + if m != nil { + return m.Constraints + } + return nil +} + +// AttrConstraint defines a constraint on a source attribute +type AttrConstraint struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Condition AttrMatch `protobuf:"varint,3,opt,name=condition,proto3,enum=moby.buildkit.v1.sourcepolicy.AttrMatch" json:"condition,omitempty"` +} + +func (m *AttrConstraint) Reset() { *m = AttrConstraint{} } +func (m *AttrConstraint) String() string { return proto.CompactTextString(m) } +func (*AttrConstraint) ProtoMessage() {} +func (*AttrConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{3} +} +func (m *AttrConstraint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AttrConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AttrConstraint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AttrConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttrConstraint.Merge(m, src) +} +func (m *AttrConstraint) XXX_Size() int { + return m.Size() +} +func (m *AttrConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_AttrConstraint.DiscardUnknown(m) +} + +var xxx_messageInfo_AttrConstraint proto.InternalMessageInfo + +func (m *AttrConstraint) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *AttrConstraint) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *AttrConstraint) GetCondition() AttrMatch { + if m != nil { + return m.Condition + } + return AttrMatch_EQUAL +} + +// Policy is the list of rules the policy engine will perform +type Policy struct { + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Rules []*Rule `protobuf:"bytes,2,rep,name=rules,proto3" json:"rules,omitempty"` +} + +func (m *Policy) Reset() { *m = Policy{} } +func (m *Policy) String() string { return proto.CompactTextString(m) } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { + return fileDescriptor_ac3b897852294d6a, []int{4} +} +func (m *Policy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Policy.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Policy) XXX_Merge(src proto.Message) { + xxx_messageInfo_Policy.Merge(m, src) +} +func (m *Policy) XXX_Size() int { + return m.Size() +} +func (m *Policy) XXX_DiscardUnknown() { + xxx_messageInfo_Policy.DiscardUnknown(m) +} + +var xxx_messageInfo_Policy proto.InternalMessageInfo + +func (m *Policy) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Policy) GetRules() []*Rule { + if m != nil { + return m.Rules + } + return nil +} + +func init() { + proto.RegisterEnum("moby.buildkit.v1.sourcepolicy.PolicyAction", PolicyAction_name, PolicyAction_value) + proto.RegisterEnum("moby.buildkit.v1.sourcepolicy.AttrMatch", AttrMatch_name, AttrMatch_value) + proto.RegisterEnum("moby.buildkit.v1.sourcepolicy.MatchType", MatchType_name, MatchType_value) + proto.RegisterType((*Rule)(nil), "moby.buildkit.v1.sourcepolicy.Rule") + proto.RegisterType((*Update)(nil), "moby.buildkit.v1.sourcepolicy.Update") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.sourcepolicy.Update.AttrsEntry") + proto.RegisterType((*Selector)(nil), "moby.buildkit.v1.sourcepolicy.Selector") + proto.RegisterType((*AttrConstraint)(nil), "moby.buildkit.v1.sourcepolicy.AttrConstraint") + proto.RegisterType((*Policy)(nil), "moby.buildkit.v1.sourcepolicy.Policy") +} + +func init() { proto.RegisterFile("policy.proto", fileDescriptor_ac3b897852294d6a) } + +var fileDescriptor_ac3b897852294d6a = []byte{ + // 516 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xc7, 0xbd, 0x4e, 0xf3, 0xe1, 0x49, 0x14, 0x59, 0x2b, 0x0e, 0x16, 0x12, 0x56, 0x14, 0x84, + 0x88, 0x82, 0x30, 0x6d, 0xb8, 0x14, 0x2e, 0xc8, 0x38, 0x6e, 0x41, 0x4a, 0x13, 0xd8, 0xa6, 0xb4, + 0x1c, 0x10, 0x72, 0x9c, 0x45, 0x58, 0x75, 0x6c, 0xcb, 0x5e, 0x47, 0xf2, 0x8d, 0x47, 0xe0, 0x39, + 0x78, 0x0e, 0x0e, 0x1c, 0xcb, 0x8d, 0x23, 0x4a, 0x5e, 0x04, 0xed, 0x3a, 0x4e, 0xc3, 0xa5, 0xce, + 0xc9, 0x3b, 0xe3, 0xf9, 0xfd, 0xe7, 0x63, 0x67, 0xa1, 0x15, 0x85, 0xbe, 0xe7, 0x66, 0x46, 0x14, + 0x87, 0x2c, 0xc4, 0x0f, 0x16, 0xe1, 0x2c, 0x33, 0x66, 0xa9, 0xe7, 0xcf, 0xaf, 0x3d, 0x66, 0x2c, + 0x8f, 0x8c, 0x24, 0x4c, 0x63, 0x97, 0xe6, 0x41, 0xdd, 0xdf, 0x08, 0x0e, 0x48, 0xea, 0x53, 0x6c, + 0x41, 0xcd, 0x71, 0x99, 0x17, 0x06, 0x1a, 0xea, 0xa0, 0x5e, 0x7b, 0xf0, 0xc4, 0xb8, 0x13, 0x34, + 0xde, 0x89, 0x8f, 0x29, 0x10, 0xb2, 0x41, 0xb1, 0x05, 0x8d, 0x84, 0xfa, 0xd4, 0x65, 0x61, 0xac, + 0xc9, 0x1d, 0xd4, 0x6b, 0x0e, 0x1e, 0x97, 0xc8, 0x9c, 0x6f, 0xc2, 0xc9, 0x16, 0xc4, 0xaf, 0xa0, + 0x9e, 0x46, 0x73, 0x87, 0xd1, 0x44, 0xab, 0x08, 0x8d, 0x47, 0x25, 0x1a, 0x17, 0x22, 0x9a, 0x14, + 0x54, 0xf7, 0x07, 0x82, 0x5a, 0xee, 0xc3, 0x3a, 0x80, 0x37, 0xa7, 0x01, 0xf3, 0xbe, 0x78, 0x34, + 0x16, 0x9d, 0x29, 0x64, 0xc7, 0x83, 0x4f, 0xa0, 0xea, 0x30, 0x16, 0x27, 0x9a, 0xdc, 0xa9, 0xf4, + 0x9a, 0x83, 0xc3, 0xbd, 0x32, 0x19, 0x26, 0x47, 0xec, 0x80, 0xc5, 0x19, 0xc9, 0xf1, 0xfb, 0xc7, + 0x00, 0xb7, 0x4e, 0xac, 0x42, 0xe5, 0x9a, 0x66, 0x9b, 0x74, 0xfc, 0x88, 0xef, 0x41, 0x75, 0xe9, + 0xf8, 0x29, 0x15, 0x53, 0x51, 0x48, 0x6e, 0xbc, 0x94, 0x8f, 0x51, 0xf7, 0x27, 0x82, 0x46, 0x31, + 0x84, 0xd2, 0x72, 0x4f, 0x01, 0x16, 0x0e, 0x73, 0xbf, 0x7e, 0x66, 0x59, 0x94, 0x6b, 0xb5, 0x07, + 0xbd, 0x92, 0x9a, 0xcf, 0x38, 0x30, 0xcd, 0x22, 0x4a, 0x94, 0x45, 0x71, 0xc4, 0x13, 0x68, 0xba, + 0x61, 0x90, 0xb0, 0xd8, 0xf1, 0x02, 0xc6, 0xe7, 0xcc, 0xbb, 0x7f, 0x5a, 0xa2, 0xc4, 0x3b, 0xb4, + 0xb6, 0x14, 0xd9, 0x55, 0xe8, 0x7e, 0x43, 0xd0, 0xfe, 0xff, 0xff, 0xbe, 0x53, 0xc0, 0x27, 0xa0, + 0xb8, 0x61, 0x30, 0xf7, 0xc4, 0xf2, 0x55, 0xf6, 0xea, 0x89, 0x67, 0x12, 0x7d, 0x91, 0x5b, 0xb4, + 0xfb, 0x09, 0x6a, 0xf9, 0x52, 0x62, 0x0d, 0xea, 0x4b, 0x1a, 0x27, 0xc5, 0x32, 0x57, 0x48, 0x61, + 0xe2, 0x17, 0x50, 0x8d, 0x53, 0x9f, 0x16, 0xf7, 0xfd, 0xb0, 0x24, 0x0f, 0x7f, 0x19, 0x24, 0x27, + 0xfa, 0x87, 0xd0, 0xda, 0xdd, 0x79, 0xac, 0x40, 0xd5, 0x1c, 0x8d, 0x26, 0x97, 0xaa, 0x84, 0x1b, + 0x70, 0x30, 0xb4, 0xc7, 0x1f, 0x55, 0x84, 0x9b, 0x50, 0xb7, 0x26, 0xe3, 0x0f, 0x36, 0x99, 0xaa, + 0x72, 0xff, 0x08, 0x94, 0x6d, 0xa1, 0x3c, 0xdc, 0x7e, 0x7f, 0x61, 0x8e, 0x54, 0x09, 0xb7, 0xa0, + 0x31, 0x9e, 0x4c, 0x73, 0x4b, 0x20, 0x67, 0xe6, 0xd4, 0x7a, 0x63, 0x9f, 0xab, 0x72, 0xff, 0x19, + 0x28, 0xdb, 0xfb, 0xe2, 0x71, 0x97, 0x6f, 0x47, 0x43, 0xcb, 0x24, 0x43, 0x55, 0x12, 0x02, 0x57, + 0xa6, 0x35, 0x55, 0x11, 0x3f, 0x12, 0xfb, 0xd4, 0xbe, 0x52, 0xe5, 0xd7, 0xda, 0xaf, 0x95, 0x8e, + 0x6e, 0x56, 0x3a, 0xfa, 0xbb, 0xd2, 0xd1, 0xf7, 0xb5, 0x2e, 0xdd, 0xac, 0x75, 0xe9, 0xcf, 0x5a, + 0x97, 0x66, 0x35, 0xf1, 0xfe, 0x9f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xae, 0x7a, 0xeb, 0x6c, + 0x0f, 0x04, 0x00, 0x00, +} + +func (m *Rule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Updates != nil { + { + size, err := m.Updates.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPolicy(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPolicy(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Action != 0 { + i = encodeVarintPolicy(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Update) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Update) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Update) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attrs) > 0 { + for k := range m.Attrs { + v := m.Attrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintPolicy(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintPolicy(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintPolicy(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Identifier) > 0 { + i -= len(m.Identifier) + copy(dAtA[i:], m.Identifier) + i = encodeVarintPolicy(dAtA, i, uint64(len(m.Identifier))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Selector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Selector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Selector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Constraints) > 0 { + for iNdEx := len(m.Constraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Constraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPolicy(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.MatchType != 0 { + i = encodeVarintPolicy(dAtA, i, uint64(m.MatchType)) + i-- + dAtA[i] = 0x10 + } + if len(m.Identifier) > 0 { + i -= len(m.Identifier) + copy(dAtA[i:], m.Identifier) + i = encodeVarintPolicy(dAtA, i, uint64(len(m.Identifier))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AttrConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttrConstraint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AttrConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Condition != 0 { + i = encodeVarintPolicy(dAtA, i, uint64(m.Condition)) + i-- + dAtA[i] = 0x18 + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintPolicy(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintPolicy(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Policy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Policy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPolicy(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Version != 0 { + i = encodeVarintPolicy(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintPolicy(dAtA []byte, offset int, v uint64) int { + offset -= sovPolicy(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Rule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != 0 { + n += 1 + sovPolicy(uint64(m.Action)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovPolicy(uint64(l)) + } + if m.Updates != nil { + l = m.Updates.Size() + n += 1 + l + sovPolicy(uint64(l)) + } + return n +} + +func (m *Update) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + sovPolicy(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovPolicy(uint64(len(k))) + 1 + len(v) + sovPolicy(uint64(len(v))) + n += mapEntrySize + 1 + sovPolicy(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Selector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + sovPolicy(uint64(l)) + } + if m.MatchType != 0 { + n += 1 + sovPolicy(uint64(m.MatchType)) + } + if len(m.Constraints) > 0 { + for _, e := range m.Constraints { + l = e.Size() + n += 1 + l + sovPolicy(uint64(l)) + } + } + return n +} + +func (m *AttrConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovPolicy(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovPolicy(uint64(l)) + } + if m.Condition != 0 { + n += 1 + sovPolicy(uint64(m.Condition)) + } + return n +} + +func (m *Policy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Version != 0 { + n += 1 + sovPolicy(uint64(m.Version)) + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovPolicy(uint64(l)) + } + } + return n +} + +func sovPolicy(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPolicy(x uint64) (n int) { + return sovPolicy(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= PolicyAction(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &Selector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Updates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Updates == nil { + m.Updates = &Update{} + } + if err := m.Updates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPolicy(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPolicy + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Update) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Update: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Update: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attrs == nil { + m.Attrs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthPolicy + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthPolicy + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthPolicy + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthPolicy + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipPolicy(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPolicy + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPolicy(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPolicy + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Selector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Selector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Selector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchType", wireType) + } + m.MatchType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MatchType |= MatchType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Constraints = append(m.Constraints, &AttrConstraint{}) + if err := m.Constraints[len(m.Constraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPolicy(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPolicy + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttrConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttrConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttrConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Condition", wireType) + } + m.Condition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Condition |= AttrMatch(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPolicy(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPolicy + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Policy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Policy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPolicy + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPolicy + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPolicy + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, &Rule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPolicy(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPolicy + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPolicy(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPolicy + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPolicy + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPolicy + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPolicy + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPolicy + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPolicy + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPolicy = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPolicy = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPolicy = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/moby/buildkit/sourcepolicy/pb/policy.proto b/vendor/github.com/moby/buildkit/sourcepolicy/pb/policy.proto new file mode 100644 index 0000000000..f46aca063f --- /dev/null +++ b/vendor/github.com/moby/buildkit/sourcepolicy/pb/policy.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package moby.buildkit.v1.sourcepolicy; + +// Rule defines the action(s) to take when a source is matched +message Rule { + PolicyAction action = 1; + Selector selector = 2; + Update updates = 3; +} + +// Update contains updates to the matched build step after rule is applied +message Update { + string identifier = 1; + map attrs = 2; +} + +// Selector identifies a source to match a policy to +message Selector { + string identifier = 1; + // MatchType is the type of match to perform on the source identifier + MatchType match_type = 2; + repeated AttrConstraint constraints = 3; +} + +// PolicyAction defines the action to take when a source is matched +enum PolicyAction { + ALLOW = 0; + DENY = 1; + CONVERT = 2; +} + +// AttrConstraint defines a constraint on a source attribute +message AttrConstraint { + string key = 1; + string value = 2; + AttrMatch condition = 3; +} + +// AttrMatch defines the condition to match a source attribute +enum AttrMatch { + EQUAL = 0; + NOTEQUAL = 1; + MATCHES = 2; +} + +// Policy is the list of rules the policy engine will perform +message Policy { + int64 version = 1; // Currently 1 + repeated Rule rules = 2; +} + +// Match type is used to determine how a rule source is matched +enum MatchType { + // WILDCARD is the default matching type. + // It may first attempt to due an exact match but will follow up with a wildcard match + // For something more powerful, use REGEX + WILDCARD = 0; + // EXACT treats the source identifier as a litteral string match + EXACT = 1; + // REGEX treats the source identifier as a regular expression + // With regex matching you can also use match groups to replace values in the destination identifier + REGEX = 2; +} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go index 499e877184..0084280c28 100644 --- a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go +++ b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go @@ -10,9 +10,11 @@ import ( ) const ( - Address = "unix:///run/buildkit/buildkitd.sock" - Root = "/var/lib/buildkit" - ConfigDir = "/etc/buildkit" + Address = "unix:///run/buildkit/buildkitd.sock" + Root = "/var/lib/buildkit" + ConfigDir = "/etc/buildkit" + DefaultCNIBinDir = "/opt/cni/bin" + DefaultCNIConfigPath = "/etc/buildkit/cni.json" ) // UserAddress typically returns /run/user/$UID/buildkit/buildkitd.sock diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go index d5d0ca1fb9..058789e48a 100644 --- a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go +++ b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go @@ -10,8 +10,10 @@ const ( ) var ( - Root = filepath.Join(os.Getenv("ProgramData"), "buildkitd", ".buildstate") - ConfigDir = filepath.Join(os.Getenv("ProgramData"), "buildkitd") + Root = filepath.Join(os.Getenv("ProgramData"), "buildkitd", ".buildstate") + ConfigDir = filepath.Join(os.Getenv("ProgramData"), "buildkitd") + DefaultCNIBinDir = filepath.Join(ConfigDir, "bin") + DefaultCNIConfigPath = filepath.Join(ConfigDir, "cni.json") ) func UserAddress() string { diff --git a/vendor/github.com/moby/buildkit/util/archutil/Dockerfile b/vendor/github.com/moby/buildkit/util/archutil/Dockerfile index 6ac641f06d..9f8e59d9db 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/Dockerfile +++ b/vendor/github.com/moby/buildkit/util/archutil/Dockerfile @@ -36,6 +36,10 @@ FROM base AS exit-s390x COPY fixtures/exit.s390x.s . RUN s390x-linux-gnu-as --noexecstack -o exit.o exit.s390x.s && s390x-linux-gnu-ld -o exit -s exit.o +FROM base AS exit-ppc64 +COPY fixtures/exit.ppc64.s . +RUN powerpc64le-linux-gnu-as -mbig --noexecstack -o exit.o exit.ppc64.s && powerpc64le-linux-gnu-ld -EB -o exit -s exit.o + FROM base AS exit-ppc64le COPY fixtures/exit.ppc64le.s . RUN powerpc64le-linux-gnu-as --noexecstack -o exit.o exit.ppc64le.s && powerpc64le-linux-gnu-ld -o exit -s exit.o @@ -48,7 +52,7 @@ FROM base AS exit-mips64 COPY fixtures/exit.mips64.s . RUN mips64-linux-gnuabi64-as --noexecstack -o exit.o exit.mips64.s && mips64-linux-gnuabi64-ld -o exit -s exit.o -FROM golang:1.17-alpine AS generate +FROM golang:1.19-alpine AS generate WORKDIR /src COPY --from=exit-amd64 /src/exit amd64 COPY --from=exit-386 /src/exit 386 @@ -56,12 +60,13 @@ COPY --from=exit-arm64 /src/exit arm64 COPY --from=exit-arm /src/exit arm COPY --from=exit-riscv64 /src/exit riscv64 COPY --from=exit-s390x /src/exit s390x +COPY --from=exit-ppc64 /src/exit ppc64 COPY --from=exit-ppc64le /src/exit ppc64le COPY --from=exit-mips64le /src/exit mips64le COPY --from=exit-mips64 /src/exit mips64 COPY generate.go . -RUN go run generate.go amd64 386 arm64 arm riscv64 s390x ppc64le mips64le mips64 && ls -l +RUN go run generate.go amd64 386 arm64 arm riscv64 s390x ppc64 ppc64le mips64le mips64 && ls -l FROM scratch diff --git a/vendor/github.com/moby/buildkit/util/archutil/check_unix.go b/vendor/github.com/moby/buildkit/util/archutil/check_unix.go index 8b558a3176..91be4d8026 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/check_unix.go +++ b/vendor/github.com/moby/buildkit/util/archutil/check_unix.go @@ -7,7 +7,6 @@ import ( "bytes" "compress/gzip" "io" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -23,7 +22,7 @@ func withChroot(cmd *exec.Cmd, dir string) { } func check(arch, bin string) (string, error) { - tmpdir, err := ioutil.TempDir("", "qemu-check") + tmpdir, err := os.MkdirTemp("", "qemu-check") if err != nil { return "", err } @@ -41,6 +40,7 @@ func check(arch, bin string) (string, error) { return "", err } + //nolint:gosec // inputs should be static strings if _, err := io.Copy(f, r); err != nil { f.Close() return "", err diff --git a/vendor/github.com/moby/buildkit/util/archutil/detect.go b/vendor/github.com/moby/buildkit/util/archutil/detect.go index 44cb3133e1..3184f9e548 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/detect.go +++ b/vendor/github.com/moby/buildkit/util/archutil/detect.go @@ -48,6 +48,11 @@ func SupportedPlatforms(noCache bool) []ocispecs.Platform { arr = append(arr, linux(p)) } } + if p := "ppc64"; def.Architecture != p { + if _, err := ppc64Supported(); err == nil { + arr = append(arr, linux(p)) + } + } if p := "ppc64le"; def.Architecture != p { if _, err := ppc64leSupported(); err == nil { arr = append(arr, linux(p)) @@ -87,9 +92,9 @@ func SupportedPlatforms(noCache bool) []ocispecs.Platform { return arr } -//WarnIfUnsupported validates the platforms and show warning message if there is, -//the end user could fix the issue based on those warning, and thus no need to drop -//the platform from the candidates. +// WarnIfUnsupported validates the platforms and show warning message if there is, +// the end user could fix the issue based on those warning, and thus no need to drop +// the platform from the candidates. func WarnIfUnsupported(pfs []ocispecs.Platform) { def := nativePlatform() for _, p := range pfs { @@ -109,6 +114,11 @@ func WarnIfUnsupported(pfs []ocispecs.Platform) { printPlatformWarning(p, err) } } + if p.Architecture == "ppc64" { + if _, err := ppc64Supported(); err != nil { + printPlatformWarning(p, err) + } + } if p.Architecture == "ppc64le" { if _, err := ppc64leSupported(); err != nil { printPlatformWarning(p, err) diff --git a/vendor/github.com/moby/buildkit/util/archutil/ppc64_binary.go b/vendor/github.com/moby/buildkit/util/archutil/ppc64_binary.go new file mode 100644 index 0000000000..d0c197c20d --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/archutil/ppc64_binary.go @@ -0,0 +1,9 @@ +//go:build !ppc64 +// +build !ppc64 + +package archutil + +// This file is generated by running make inside the archutil package. +// Do not edit manually. + +const Binaryppc64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\xd0\xb1\x8a\x13\x51\x14\x06\xe0\xff\x8e\xd9\x45\xd0\x62\x2c\x84\x05\x9b\x3c\x40\x98\x7a\xcb\x14\x6a\x65\xa3\x2f\xa0\x2b\x89\x6c\x23\xca\xee\x14\x76\xfb\xb4\x81\xbc\x45\x24\x93\xc9\x64\x12\x89\xa4\xb0\x92\xef\x83\xdc\x73\x72\x66\x7e\xce\x65\x9e\xde\x7d\x78\x5f\x55\x25\x83\x2a\xaf\x93\x74\x83\xba\x6c\xd6\xfd\x74\xde\x9d\x25\xd3\xee\x9c\xe7\x36\x93\xcc\x73\x95\x49\xff\xee\x55\x46\xea\x93\x9a\x94\xd9\x51\x2d\xc3\x79\xbd\x9b\xef\xf6\xec\xf7\x8d\xf6\xde\x1c\xd5\x92\x2c\xda\xd5\xc7\x43\xee\x62\xf5\xa2\x5d\x7d\x4a\xba\xfb\x5e\xbe\x2f\x29\xb7\xdb\xdf\x97\xe4\xed\xf6\xcb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfc\x27\xea\x94\x69\x57\xab\xa7\xc3\xb0\x79\xbc\x7f\x6c\x1f\xda\xbb\xaf\x69\xda\xe5\xaf\x36\xcd\xf2\xfe\xf3\xb7\x87\xbb\xef\xcb\x34\x3f\x7e\x2e\xfe\xc5\xda\x17\x49\x4a\xdf\x5f\x8f\xef\x91\xa1\xe6\xe5\x49\xe6\xf9\xa8\x7f\x35\xca\x57\x7d\x7e\xd6\xe7\x67\x67\x76\x4e\x46\xfd\x9b\x51\xfe\x59\x97\x2f\x9b\xf5\xee\xef\xbe\xe6\xe6\x2f\xfb\xcb\x3e\xf7\x87\x32\x74\xd3\x73\x4f\x7e\x07\x00\x00\xff\xff\x5e\xe4\x1d\xbd\x60\x01\x01\x00" diff --git a/vendor/github.com/moby/buildkit/util/archutil/ppc64_check.go b/vendor/github.com/moby/buildkit/util/archutil/ppc64_check.go new file mode 100644 index 0000000000..00fe3e16ff --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/archutil/ppc64_check.go @@ -0,0 +1,8 @@ +//go:build !ppc64 +// +build !ppc64 + +package archutil + +func ppc64Supported() (string, error) { + return check("ppc64", Binaryppc64) +} diff --git a/vendor/github.com/moby/buildkit/util/archutil/ppc64_check_ppc64.go b/vendor/github.com/moby/buildkit/util/archutil/ppc64_check_ppc64.go new file mode 100644 index 0000000000..82e6958454 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/archutil/ppc64_check_ppc64.go @@ -0,0 +1,8 @@ +//go:build ppc64 +// +build ppc64 + +package archutil + +func ppc64Supported() (string, error) { + return "", nil +} diff --git a/vendor/github.com/moby/buildkit/util/attestation/types.go b/vendor/github.com/moby/buildkit/util/attestation/types.go new file mode 100644 index 0000000000..35f4404cd6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/attestation/types.go @@ -0,0 +1,11 @@ +package attestation + +const ( + MediaTypeDockerSchema2AttestationType = "application/vnd.in-toto+json" + + DockerAnnotationReferenceType = "vnd.docker.reference.type" + DockerAnnotationReferenceDigest = "vnd.docker.reference.digest" + DockerAnnotationReferenceDescription = "vnd.docker.reference.description" + + DockerAnnotationReferenceTypeDefault = "attestation-manifest" +) diff --git a/vendor/github.com/moby/buildkit/util/buildinfo/buildinfo.go b/vendor/github.com/moby/buildkit/util/buildinfo/buildinfo.go index 9771d9d348..64b9ea48e1 100644 --- a/vendor/github.com/moby/buildkit/util/buildinfo/buildinfo.go +++ b/vendor/github.com/moby/buildkit/util/buildinfo/buildinfo.go @@ -10,12 +10,70 @@ import ( ctnref "github.com/containerd/containerd/reference" "github.com/docker/distribution/reference" "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/solver/llbsolver/provenance" "github.com/moby/buildkit/source" binfotypes "github.com/moby/buildkit/util/buildinfo/types" "github.com/moby/buildkit/util/urlutil" "github.com/pkg/errors" ) +// BuildInfo format has been deprecated and will be removed in a future release. +// Use provenance attestations instead. + +func FromProvenance(c *provenance.Capture) (*binfotypes.BuildInfo, error) { + var bi binfotypes.BuildInfo + + bi.Frontend = c.Frontend + bi.Attrs = map[string]*string{} + for k, v := range c.Args { + v := v + bi.Attrs[k] = &v + } + + for _, s := range c.Sources.Images { + bi.Sources = append(bi.Sources, binfotypes.Source{ + Type: binfotypes.SourceTypeDockerImage, + Ref: s.Ref, + Pin: s.Digest.String(), + }) + } + + for _, s := range c.Sources.HTTP { + bi.Sources = append(bi.Sources, binfotypes.Source{ + Type: binfotypes.SourceTypeHTTP, + Ref: s.URL, + Pin: s.Digest.String(), + }) + } + + for _, s := range c.Sources.Git { + bi.Sources = append(bi.Sources, binfotypes.Source{ + Type: binfotypes.SourceTypeGit, + Ref: s.URL, + Pin: s.Commit, + }) + } + + sort.Slice(bi.Sources, func(i, j int) bool { + return bi.Sources[i].Ref < bi.Sources[j].Ref + }) + + return &bi, nil +} + +func AddMetadata(metadata map[string][]byte, key string, c *provenance.Capture) error { + bi, err := FromProvenance(c) + if err != nil { + return err + } + dt, err := json.Marshal(bi) + if err != nil { + return err + } + metadata[key] = dt + return nil +} + // Decode decodes a base64 encoded build info. func Decode(enc string) (bi binfotypes.BuildInfo, _ error) { dec, err := base64.StdEncoding.DecodeString(enc) @@ -323,7 +381,7 @@ func filterAttrs(key string, attrs map[string]*string) map[string]*string { continue } // always include - if strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") { + if strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") || strings.HasPrefix(k, "vcs:") { filtered[k] = v continue } @@ -377,49 +435,6 @@ func isControlArg(attrKey string) bool { return false } -// GetMetadata returns buildinfo metadata for the specified key. If the key -// is already there, result will be merged. -func GetMetadata(metadata map[string][]byte, key string, reqFrontend string, reqAttrs map[string]string) ([]byte, error) { - if metadata == nil { - metadata = make(map[string][]byte) - } - var dtbi []byte - if v, ok := metadata[key]; ok && v != nil { - var mbi binfotypes.BuildInfo - if errm := json.Unmarshal(v, &mbi); errm != nil { - return nil, errors.Wrapf(errm, "failed to unmarshal build info for %q", key) - } - if reqFrontend != "" { - mbi.Frontend = reqFrontend - } - if deps, err := decodeDeps(key, convertMap(reduceMapString(reqAttrs, mbi.Attrs))); err == nil { - mbi.Deps = reduceMapBuildInfo(deps, mbi.Deps) - } else { - return nil, err - } - mbi.Attrs = filterAttrs(key, convertMap(reduceMapString(reqAttrs, mbi.Attrs))) - var err error - dtbi, err = json.Marshal(mbi) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal build info for %q", key) - } - } else { - deps, err := decodeDeps(key, convertMap(reqAttrs)) - if err != nil { - return nil, err - } - dtbi, err = json.Marshal(binfotypes.BuildInfo{ - Frontend: reqFrontend, - Attrs: filterAttrs(key, convertMap(reqAttrs)), - Deps: deps, - }) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal build info for %q", key) - } - } - return dtbi, nil -} - func reduceMapString(m1 map[string]string, m2 map[string]*string) map[string]string { if m1 == nil && m2 == nil { return nil @@ -434,25 +449,3 @@ func reduceMapString(m1 map[string]string, m2 map[string]*string) map[string]str } return m1 } - -func reduceMapBuildInfo(m1 map[string]binfotypes.BuildInfo, m2 map[string]binfotypes.BuildInfo) map[string]binfotypes.BuildInfo { - if m1 == nil && m2 == nil { - return nil - } - if m1 == nil { - m1 = map[string]binfotypes.BuildInfo{} - } - for k, v := range m2 { - m1[k] = v - } - return m1 -} - -func convertMap(m map[string]string) map[string]*string { - res := make(map[string]*string) - for k, v := range m { - value := v - res[k] = &value - } - return res -} diff --git a/vendor/github.com/moby/buildkit/util/compression/compression.go b/vendor/github.com/moby/buildkit/util/compression/compression.go index ba44a9270b..cfc26b9078 100644 --- a/vendor/github.com/moby/buildkit/util/compression/compression.go +++ b/vendor/github.com/moby/buildkit/util/compression/compression.go @@ -5,33 +5,53 @@ import ( "context" "io" + cdcompression "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" "github.com/containerd/stargz-snapshotter/estargz" + "github.com/moby/buildkit/util/iohelper" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -// Type represents compression type for blob data. -type Type int +type Compressor func(dest io.Writer, mediaType string) (io.WriteCloser, error) +type Decompressor func(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) +type Finalizer func(context.Context, content.Store) (map[string]string, error) -const ( +// Type represents compression type for blob data, which needs +// to be implemented for each compression type. +type Type interface { + Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) + Decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) + NeedsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) + NeedsComputeDiffBySelf() bool + OnlySupportOCITypes() bool + NeedsForceCompression() bool + MediaType() string + String() string +} + +type ( + uncompressedType struct{} + gzipType struct{} + estargzType struct{} + zstdType struct{} +) + +var ( // Uncompressed indicates no compression. - Uncompressed Type = iota + Uncompressed = uncompressedType{} // Gzip is used for blob data. - Gzip + Gzip = gzipType{} // EStargz is used for estargz data. - EStargz + EStargz = estargzType{} // Zstd is used for Zstandard data. - Zstd - - // UnknownCompression means not supported yet. - UnknownCompression Type = -1 + Zstd = zstdType{} ) type Config struct { @@ -61,70 +81,42 @@ const ( mediaTypeImageLayerZstd = ocispecs.MediaTypeImageLayer + "+zstd" // unreleased image-spec#790 ) -var Default = Gzip +var Default gzipType = Gzip -func Parse(t string) Type { +func parse(t string) (Type, error) { switch t { - case "uncompressed": - return Uncompressed - case "gzip": - return Gzip - case "estargz": - return EStargz - case "zstd": - return Zstd + case Uncompressed.String(): + return Uncompressed, nil + case Gzip.String(): + return Gzip, nil + case EStargz.String(): + return EStargz, nil + case Zstd.String(): + return Zstd, nil default: - return UnknownCompression + return nil, errors.Errorf("unsupported compression type %s", t) } } -func (ct Type) String() string { - switch ct { - case Uncompressed: - return "uncompressed" - case Gzip: - return "gzip" - case EStargz: - return "estargz" - case Zstd: - return "zstd" +func fromMediaType(mediaType string) (Type, error) { + switch toOCILayerType[mediaType] { + case ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerNonDistributable: + return Uncompressed, nil + case ocispecs.MediaTypeImageLayerGzip, ocispecs.MediaTypeImageLayerNonDistributableGzip: + return Gzip, nil + case mediaTypeImageLayerZstd, ocispecs.MediaTypeImageLayerNonDistributableZstd: + return Zstd, nil default: - return "unknown" + return nil, errors.Errorf("unsupported media type %s", mediaType) } } -func (ct Type) DefaultMediaType() string { - switch ct { - case Uncompressed: - return ocispecs.MediaTypeImageLayer - case Gzip, EStargz: - return ocispecs.MediaTypeImageLayerGzip - case Zstd: - return mediaTypeImageLayerZstd - default: - return ocispecs.MediaTypeImageLayer + "+unknown" - } -} - -func (ct Type) IsMediaType(mt string) bool { +func IsMediaType(ct Type, mt string) bool { mt, ok := toOCILayerType[mt] if !ok { return false } - return mt == ct.DefaultMediaType() -} - -func FromMediaType(mediaType string) Type { - switch toOCILayerType[mediaType] { - case ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerNonDistributable: - return Uncompressed - case ocispecs.MediaTypeImageLayerGzip, ocispecs.MediaTypeImageLayerNonDistributableGzip: - return Gzip - case mediaTypeImageLayerZstd, ocispecs.MediaTypeImageLayerNonDistributableZstd: - return Zstd - default: - return UnknownCompression - } + return mt == ct.MediaType() } // DetectLayerMediaType returns media type from existing blob data. @@ -170,7 +162,7 @@ func detectCompressionType(cr *io.SectionReader) (Type, error) { // means just create an empty layer. // // See issue docker/docker#18170 - return UnknownCompression, err + return nil, err } if _, _, err := estargz.OpenFooter(cr); err == nil { @@ -241,3 +233,25 @@ func ConvertAllLayerMediaTypes(oci bool, descs ...ocispecs.Descriptor) []ocispec } return converted } + +func decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (r io.ReadCloser, err error) { + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + esgz, err := EStargz.Is(ctx, cs, desc.Digest) + if err != nil { + return nil, err + } else if esgz { + r, err = decompressEStargz(io.NewSectionReader(ra, 0, ra.Size())) + if err != nil { + return nil, err + } + } else { + r, err = cdcompression.DecompressStream(io.NewSectionReader(ra, 0, ra.Size())) + if err != nil { + return nil, err + } + } + return &iohelper.ReadCloser{ReadCloser: r, CloseFunc: ra.Close}, nil +} diff --git a/vendor/github.com/moby/buildkit/cache/estargz.go b/vendor/github.com/moby/buildkit/util/compression/estargz.go similarity index 76% rename from vendor/github.com/moby/buildkit/cache/estargz.go rename to vendor/github.com/moby/buildkit/util/compression/estargz.go index f67d14925d..9d44d94048 100644 --- a/vendor/github.com/moby/buildkit/cache/estargz.go +++ b/vendor/github.com/moby/buildkit/util/compression/estargz.go @@ -1,4 +1,4 @@ -package cache +package compression import ( "archive/tar" @@ -11,24 +11,30 @@ import ( cdcompression "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" "github.com/containerd/stargz-snapshotter/estargz" - "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/iohelper" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) -var eStargzAnnotations = []string{estargz.TOCJSONDigestAnnotation, estargz.StoreUncompressedSizeAnnotation} +var EStargzAnnotations = []string{estargz.TOCJSONDigestAnnotation, estargz.StoreUncompressedSizeAnnotation} -// compressEStargz writes the passed blobs stream as an eStargz-compressed blob. -// finalize function finalizes the written blob metadata and returns all eStargz annotations. -func compressEStargz(comp compression.Config) (compressorFunc compressor, finalize func(context.Context, content.Store) (map[string]string, error)) { +const containerdUncompressed = "containerd.io/uncompressed" +const estargzLabel = "buildkit.io/compression/estargz" + +func (c estargzType) Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) { var cInfo *compressionInfo var writeErr error var mu sync.Mutex return func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) { - if compression.FromMediaType(requiredMediaType) != compression.Gzip { - return nil, fmt.Errorf("unsupported media type for estargz compressor %q", requiredMediaType) + ct, err := FromMediaType(requiredMediaType) + if err != nil { + return nil, err + } + if ct != Gzip { + return nil, errors.Errorf("unsupported media type for estargz compressor %q", requiredMediaType) } done := make(chan struct{}) pr, pw := io.Pipe() @@ -76,7 +82,7 @@ func compressEStargz(comp compression.Config) (compressorFunc compressor, finali pr.Close() return nil }() - return &writeCloser{pw, func() error { + return &iohelper.WriteCloser{WriteCloser: pw, CloseFunc: func() error { <-done // wait until the write completes return nil }}, nil @@ -113,11 +119,44 @@ func compressEStargz(comp compression.Config) (compressorFunc compressor, finali } } -const estargzLabel = "buildkit.io/compression/estargz" +func (c estargzType) Decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) { + return decompress(ctx, cs, desc) +} + +func (c estargzType) NeedsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) { + esgz, err := c.Is(ctx, cs, desc.Digest) + if err != nil { + return false, err + } + if !images.IsLayerType(desc.MediaType) || esgz { + return false, nil + } + return true, nil +} + +func (c estargzType) NeedsComputeDiffBySelf() bool { + return true +} + +func (c estargzType) OnlySupportOCITypes() bool { + return true +} + +func (c estargzType) NeedsForceCompression() bool { + return false +} + +func (c estargzType) MediaType() string { + return ocispecs.MediaTypeImageLayerGzip +} + +func (c estargzType) String() string { + return "estargz" +} // isEStargz returns true when the specified digest of content exists in // the content store and it's eStargz. -func isEStargz(ctx context.Context, cs content.Store, dgst digest.Digest) (bool, error) { +func (c estargzType) Is(ctx context.Context, cs content.Store, dgst digest.Digest) (bool, error) { info, err := cs.Info(ctx, dgst) if err != nil { return false, nil @@ -178,39 +217,6 @@ func decompressEStargz(r *io.SectionReader) (io.ReadCloser, error) { return estargz.Unpack(r, new(estargz.GzipDecompressor)) } -type writeCloser struct { - io.WriteCloser - closeFunc func() error -} - -func (wc *writeCloser) Close() error { - err1 := wc.WriteCloser.Close() - err2 := wc.closeFunc() - if err1 != nil { - return errors.Wrapf(err1, "failed to close: %v", err2) - } - return err2 -} - -type counter struct { - n int64 - mu sync.Mutex -} - -func (c *counter) Write(p []byte) (n int, err error) { - c.mu.Lock() - c.n += int64(len(p)) - c.mu.Unlock() - return len(p), nil -} - -func (c *counter) size() (n int64) { - c.mu.Lock() - n = c.n - c.mu.Unlock() - return -} - type compressionInfo struct { blobInfo tocDigest digest.Digest @@ -227,7 +233,7 @@ func calculateBlobInfo() (io.WriteCloser, chan blobInfo) { pr, pw := io.Pipe() go func() { defer pr.Close() - c := new(counter) + c := new(iohelper.Counter) dgstr := digest.Canonical.Digester() diffID := digest.Canonical.Digester() decompressR, err := cdcompression.DecompressStream(io.TeeReader(pr, dgstr.Hash())) @@ -244,7 +250,7 @@ func calculateBlobInfo() (io.WriteCloser, chan blobInfo) { pr.CloseWithError(err) return } - res <- blobInfo{dgstr.Digest(), diffID.Digest(), c.size()} + res <- blobInfo{dgstr.Digest(), diffID.Digest(), c.Size()} }() return pw, res } diff --git a/vendor/github.com/moby/buildkit/util/compression/gzip.go b/vendor/github.com/moby/buildkit/util/compression/gzip.go new file mode 100644 index 0000000000..7120ba35e3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/compression/gzip.go @@ -0,0 +1,69 @@ +package compression + +import ( + "compress/gzip" + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func (c gzipType) Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) { + return func(dest io.Writer, _ string) (io.WriteCloser, error) { + return gzipWriter(comp)(dest) + }, nil +} + +func (c gzipType) Decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) { + return decompress(ctx, cs, desc) +} + +func (c gzipType) NeedsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) { + esgz, err := EStargz.Is(ctx, cs, desc.Digest) + if err != nil { + return false, err + } + if !images.IsLayerType(desc.MediaType) { + return false, nil + } + ct, err := FromMediaType(desc.MediaType) + if err != nil { + return false, err + } + if ct == Gzip && !esgz { + return false, nil + } + return true, nil +} + +func (c gzipType) NeedsComputeDiffBySelf() bool { + return false +} + +func (c gzipType) OnlySupportOCITypes() bool { + return false +} + +func (c gzipType) NeedsForceCompression() bool { + return false +} + +func (c gzipType) MediaType() string { + return ocispecs.MediaTypeImageLayerGzip +} + +func (c gzipType) String() string { + return "gzip" +} + +func gzipWriter(comp Config) func(io.Writer) (io.WriteCloser, error) { + return func(dest io.Writer) (io.WriteCloser, error) { + level := gzip.DefaultCompression + if comp.Level != nil { + level = *comp.Level + } + return gzip.NewWriterLevel(dest, level) + } +} diff --git a/vendor/github.com/moby/buildkit/util/compression/nydus.go b/vendor/github.com/moby/buildkit/util/compression/nydus.go new file mode 100644 index 0000000000..4e04be70b7 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/compression/nydus.go @@ -0,0 +1,141 @@ +//go:build nydus +// +build nydus + +package compression + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + + nydusify "github.com/containerd/nydus-snapshotter/pkg/converter" +) + +type nydusType struct{} + +var Nydus = nydusType{} + +func init() { + toDockerLayerType[nydusify.MediaTypeNydusBlob] = nydusify.MediaTypeNydusBlob + toOCILayerType[nydusify.MediaTypeNydusBlob] = nydusify.MediaTypeNydusBlob +} + +func Parse(t string) (Type, error) { + ct, err := parse(t) + if err != nil && t == Nydus.String() { + return Nydus, nil + } + return ct, err +} + +func FromMediaType(mediaType string) (Type, error) { + ct, err := fromMediaType(mediaType) + if err != nil && mediaType == nydusify.MediaTypeNydusBlob { + return Nydus, nil + } + return ct, err +} + +func (c nydusType) Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) { + digester := digest.Canonical.Digester() + return func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) { + writer := io.MultiWriter(dest, digester.Hash()) + return nydusify.Pack(ctx, writer, nydusify.PackOption{}) + }, func(ctx context.Context, cs content.Store) (map[string]string, error) { + // Fill necessary labels + uncompressedDgst := digester.Digest().String() + info, err := cs.Info(ctx, digester.Digest()) + if err != nil { + return nil, errors.Wrap(err, "get info from content store") + } + if info.Labels == nil { + info.Labels = make(map[string]string) + } + info.Labels[containerdUncompressed] = uncompressedDgst + if _, err := cs.Update(ctx, info, "labels."+containerdUncompressed); err != nil { + return nil, errors.Wrap(err, "update info to content store") + } + + // Fill annotations + annotations := map[string]string{ + containerdUncompressed: uncompressedDgst, + // Use this annotation to identify nydus blob layer. + nydusify.LayerAnnotationNydusBlob: "true", + } + return annotations, nil + } +} + +func (c nydusType) Decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) { + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + + go func() { + defer pw.Close() + if err := nydusify.Unpack(ctx, ra, pw, nydusify.UnpackOption{}); err != nil { + pw.CloseWithError(errors.Wrap(err, "unpack nydus blob")) + } + }() + + return pr, nil +} + +func (c nydusType) NeedsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) { + if !images.IsLayerType(desc.MediaType) { + return false, nil + } + + if isNydusBlob, err := c.Is(ctx, cs, desc); err != nil { + return true, nil + } else if isNydusBlob { + return false, nil + } + + return true, nil +} + +func (c nydusType) NeedsComputeDiffBySelf() bool { + return true +} + +func (c nydusType) OnlySupportOCITypes() bool { + return true +} + +func (c nydusType) NeedsForceCompression() bool { + return true +} + +func (c nydusType) MediaType() string { + return nydusify.MediaTypeNydusBlob +} + +func (c nydusType) String() string { + return "nydus" +} + +// Is returns true when the specified digest of content exists in +// the content store and it's nydus format. +func (c nydusType) Is(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) { + if desc.Annotations == nil { + return false, nil + } + hasMediaType := desc.MediaType == nydusify.MediaTypeNydusBlob + _, hasAnno := desc.Annotations[nydusify.LayerAnnotationNydusBlob] + + _, err := cs.Info(ctx, desc.Digest) + if err != nil { + return false, err + } + + return hasMediaType && hasAnno, nil +} diff --git a/vendor/github.com/moby/buildkit/util/compression/parse.go b/vendor/github.com/moby/buildkit/util/compression/parse.go new file mode 100644 index 0000000000..6567da4e87 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/compression/parse.go @@ -0,0 +1,12 @@ +//go:build !nydus +// +build !nydus + +package compression + +func Parse(t string) (Type, error) { + return parse(t) +} + +func FromMediaType(mediaType string) (Type, error) { + return fromMediaType(mediaType) +} diff --git a/vendor/github.com/moby/buildkit/util/compression/uncompressed.go b/vendor/github.com/moby/buildkit/util/compression/uncompressed.go new file mode 100644 index 0000000000..5fc5b8e92a --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/compression/uncompressed.go @@ -0,0 +1,61 @@ +package compression + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/docker/docker/pkg/ioutils" + "github.com/moby/buildkit/util/iohelper" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func (c uncompressedType) Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) { + return func(dest io.Writer, mediaType string) (io.WriteCloser, error) { + return &iohelper.NopWriteCloser{Writer: dest}, nil + }, nil +} + +func (c uncompressedType) Decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) { + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + rdr := io.NewSectionReader(ra, 0, ra.Size()) + return ioutils.NewReadCloserWrapper(rdr, ra.Close), nil +} + +func (c uncompressedType) NeedsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) { + if !images.IsLayerType(desc.MediaType) { + return false, nil + } + ct, err := FromMediaType(desc.MediaType) + if err != nil { + return false, err + } + if ct == Uncompressed { + return false, nil + } + return true, nil +} + +func (c uncompressedType) NeedsComputeDiffBySelf() bool { + return false +} + +func (c uncompressedType) OnlySupportOCITypes() bool { + return false +} + +func (c uncompressedType) NeedsForceCompression() bool { + return false +} + +func (c uncompressedType) MediaType() string { + return ocispecs.MediaTypeImageLayer +} + +func (c uncompressedType) String() string { + return "uncompressed" +} diff --git a/vendor/github.com/moby/buildkit/util/compression/zstd.go b/vendor/github.com/moby/buildkit/util/compression/zstd.go new file mode 100644 index 0000000000..f18872199f --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/compression/zstd.go @@ -0,0 +1,80 @@ +package compression + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/klauspost/compress/zstd" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func (c zstdType) Compress(ctx context.Context, comp Config) (compressorFunc Compressor, finalize Finalizer) { + return func(dest io.Writer, _ string) (io.WriteCloser, error) { + return zstdWriter(comp)(dest) + }, nil +} + +func (c zstdType) Decompress(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (io.ReadCloser, error) { + return decompress(ctx, cs, desc) +} + +func (c zstdType) NeedsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (bool, error) { + if !images.IsLayerType(desc.MediaType) { + return false, nil + } + ct, err := FromMediaType(desc.MediaType) + if err != nil { + return false, err + } + if ct == Zstd { + return false, nil + } + return true, nil +} + +func (c zstdType) NeedsComputeDiffBySelf() bool { + return true +} + +func (c zstdType) OnlySupportOCITypes() bool { + return false +} + +func (c zstdType) NeedsForceCompression() bool { + return false +} + +func (c zstdType) MediaType() string { + return mediaTypeImageLayerZstd +} + +func (c zstdType) String() string { + return "zstd" +} + +func zstdWriter(comp Config) func(io.Writer) (io.WriteCloser, error) { + return func(dest io.Writer) (io.WriteCloser, error) { + level := zstd.SpeedDefault + if comp.Level != nil { + level = toZstdEncoderLevel(*comp.Level) + } + return zstd.NewWriter(dest, zstd.WithEncoderLevel(level)) + } +} + +func toZstdEncoderLevel(level int) zstd.EncoderLevel { + // map zstd compression levels to go-zstd levels + // once we also have c based implementation move this to helper pkg + if level < 0 { + return zstd.SpeedDefault + } else if level < 3 { + return zstd.SpeedFastest + } else if level < 7 { + return zstd.SpeedDefault + } else if level < 9 { + return zstd.SpeedBetterCompression + } + return zstd.SpeedBestCompression +} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/buffer.go b/vendor/github.com/moby/buildkit/util/contentutil/buffer.go index 8babab9177..9230b20731 100644 --- a/vendor/github.com/moby/buildkit/util/contentutil/buffer.go +++ b/vendor/github.com/moby/buildkit/util/contentutil/buffer.go @@ -3,7 +3,7 @@ package contentutil import ( "bytes" "context" - "io/ioutil" + "io" "strings" "sync" "time" @@ -117,7 +117,7 @@ func (b *buffer) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (conten if err != nil { return nil, err } - return &readerAt{Reader: r, Closer: ioutil.NopCloser(r), size: int64(r.Len())}, nil + return &readerAt{Reader: r, Closer: io.NopCloser(r), size: int64(r.Len())}, nil } func (b *buffer) getBytesReader(ctx context.Context, dgst digest.Digest) (*bytes.Reader, error) { diff --git a/vendor/github.com/moby/buildkit/util/contentutil/copy.go b/vendor/github.com/moby/buildkit/util/contentutil/copy.go index 2509ce1a3b..5039bd0c20 100644 --- a/vendor/github.com/moby/buildkit/util/contentutil/copy.go +++ b/vendor/github.com/moby/buildkit/util/contentutil/copy.go @@ -3,6 +3,7 @@ package contentutil import ( "context" "io" + "strings" "sync" "github.com/containerd/containerd/content" @@ -75,7 +76,7 @@ func CopyChain(ctx context.Context, ingester content.Ingester, provider content. } }) handlers := []images.Handler{ - images.ChildrenHandler(provider), + annotateDistributionSourceHandler(images.ChildrenHandler(provider), desc.Annotations), filterHandler, retryhandler.New(limited.FetchHandler(ingester, &localFetcher{provider}, ""), func(_ []byte) {}), } @@ -92,3 +93,45 @@ func CopyChain(ctx context.Context, ingester content.Ingester, provider content. return nil } + +func annotateDistributionSourceHandler(f images.HandlerFunc, basis map[string]string) images.HandlerFunc { + return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // only add distribution source for the config or blob data descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex: + default: + return children, nil + } + + for i := range children { + child := children[i] + + for k, v := range basis { + if !strings.HasPrefix(k, "containerd.io/distribution.source.") { + continue + } + if child.Annotations != nil { + if _, ok := child.Annotations[k]; ok { + // don't override if already present + continue + } + } + + if child.Annotations == nil { + child.Annotations = map[string]string{} + } + child.Annotations[k] = v + } + + children[i] = child + } + + return children, nil + } +} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go b/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go index 469096d340..aba096d7c3 100644 --- a/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go +++ b/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go @@ -6,6 +6,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" + "github.com/moby/buildkit/session" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -90,3 +91,23 @@ func (mp *MultiProvider) Add(dgst digest.Digest, p content.Provider) { defer mp.mu.Unlock() mp.sub[dgst] = p } + +func (mp *MultiProvider) UnlazySession(desc ocispecs.Descriptor) session.Group { + type unlazySession interface { + UnlazySession(ocispecs.Descriptor) session.Group + } + + mp.mu.RLock() + if p, ok := mp.sub[desc.Digest]; ok { + mp.mu.RUnlock() + if cd, ok := p.(unlazySession); ok { + return cd.UnlazySession(desc) + } + } else { + mp.mu.RUnlock() + } + if cd, ok := mp.base.(unlazySession); ok { + return cd.UnlazySession(desc) + } + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go b/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go index 6e0557961c..c53a24b865 100644 --- a/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go +++ b/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go @@ -145,7 +145,7 @@ func getCurrentCaps() ([]string, error) { func getAllCaps() ([]string, error) { availableCaps, err := getCurrentCaps() if err != nil { - return nil, fmt.Errorf("error getting current capabilities: %s", err) + return nil, errors.Errorf("error getting current capabilities: %s", err) } // see if any of the base linux35Caps are not available to be granted diff --git a/vendor/github.com/moby/buildkit/util/gitutil/git_ref.go b/vendor/github.com/moby/buildkit/util/gitutil/git_ref.go new file mode 100644 index 0000000000..05ae9a02ec --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/gitutil/git_ref.go @@ -0,0 +1,95 @@ +package gitutil + +import ( + "regexp" + "strings" + + "github.com/containerd/containerd/errdefs" +) + +// GitRef represents a git ref. +// +// Examples: +// - "https://github.com/foo/bar.git#baz/qux:quux/quuz" is parsed into: +// {Remote: "https://github.com/foo/bar.git", ShortName: "bar", Commit:"baz/qux", SubDir: "quux/quuz"}. +type GitRef struct { + // Remote is the remote repository path. + Remote string + + // ShortName is the directory name of the repo. + // e.g., "bar" for "https://github.com/foo/bar.git" + ShortName string + + // Commit is a commit hash, a tag, or branch name. + // Commit is optional. + Commit string + + // SubDir is a directory path inside the repo. + // SubDir is optional. + SubDir string + + // IndistinguishableFromLocal is true for a ref that is indistinguishable from a local file path, + // e.g., "github.com/foo/bar". + // + // Deprecated. + // Instead, use a distinguishable form such as "https://github.com/foo/bar.git". + // + // The dockerfile frontend still accepts this form only for build contexts. + IndistinguishableFromLocal bool + + // UnencryptedTCP is true for a ref that needs an unencrypted TCP connection, + // e.g., "git://..." and "http://..." . + // + // Discouraged, although not deprecated. + // Instead, consider using an encrypted TCP connection such as "git@github.com/foo/bar.git" or "https://github.com/foo/bar.git". + UnencryptedTCP bool +} + +// var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`) + +// ParseGitRef parses a git ref. +func ParseGitRef(ref string) (*GitRef, error) { + res := &GitRef{} + + if strings.HasPrefix(ref, "github.com/") { + res.IndistinguishableFromLocal = true // Deprecated + } else { + _, proto := ParseProtocol(ref) + switch proto { + case UnknownProtocol: + return nil, errdefs.ErrInvalidArgument + } + switch proto { + case HTTPProtocol, GitProtocol: + res.UnencryptedTCP = true // Discouraged, but not deprecated + } + switch proto { + // An HTTP(S) URL is considered to be a valid git ref only when it has the ".git[...]" suffix. + case HTTPProtocol, HTTPSProtocol: + var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`) + if !gitURLPathWithFragmentSuffix.MatchString(ref) { + return nil, errdefs.ErrInvalidArgument + } + } + } + + refSplitBySharp := strings.SplitN(ref, "#", 2) + res.Remote = refSplitBySharp[0] + if len(res.Remote) == 0 { + return res, errdefs.ErrInvalidArgument + } + + if len(refSplitBySharp) > 1 { + refSplitBySharpSplitByColon := strings.SplitN(refSplitBySharp[1], ":", 2) + res.Commit = refSplitBySharpSplitByColon[0] + if len(res.Commit) == 0 { + return res, errdefs.ErrInvalidArgument + } + if len(refSplitBySharpSplitByColon) > 1 { + res.SubDir = refSplitBySharpSplitByColon[1] + } + } + repoSplitBySlash := strings.Split(res.Remote, "/") + res.ShortName = strings.TrimSuffix(repoSplitBySlash[len(repoSplitBySlash)-1], ".git") + return res, nil +} diff --git a/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go b/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go index f52f18673e..6cd9fae98e 100644 --- a/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go +++ b/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go @@ -5,8 +5,9 @@ import ( "errors" "github.com/containerd/typeurl" + rpc "github.com/gogo/googleapis/google/rpc" gogotypes "github.com/gogo/protobuf/types" - "github.com/golang/protobuf/proto" // nolint:staticcheck + "github.com/golang/protobuf/proto" //nolint:staticcheck "github.com/golang/protobuf/ptypes/any" "github.com/moby/buildkit/util/stack" "github.com/sirupsen/logrus" @@ -42,6 +43,14 @@ func ToGRPC(err error) error { st = status.FromProto(pb) } + // If the original error was wrapped with more context than the GRPCStatus error, + // copy the original message to the GRPCStatus error + if err.Error() != st.Message() { + pb := st.Proto() + pb.Message = err.Error() + st = status.FromProto(pb) + } + var details []proto.Message for _, st := range stack.Traces(err) { @@ -173,7 +182,7 @@ func FromGRPC(err error) error { for _, s := range stacks { if s != nil { - err = stack.Wrap(err, *s) + err = stack.Wrap(err, s) } } @@ -188,6 +197,20 @@ func FromGRPC(err error) error { return stack.Enable(err) } +func ToRPCStatus(st *spb.Status) *rpc.Status { + details := make([]*gogotypes.Any, len(st.Details)) + + for i, d := range st.Details { + details[i] = gogoAny(d) + } + + return &rpc.Status{ + Code: int32(st.Code), + Message: st.Message, + Details: details, + } +} + type grpcStatusError struct { st *status.Status } diff --git a/vendor/github.com/moby/buildkit/util/imageutil/config.go b/vendor/github.com/moby/buildkit/util/imageutil/config.go index 54c46e5f60..76e0a5da35 100644 --- a/vendor/github.com/moby/buildkit/util/imageutil/config.go +++ b/vendor/github.com/moby/buildkit/util/imageutil/config.go @@ -13,6 +13,7 @@ import ( "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" + "github.com/moby/buildkit/util/attestation" "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/resolver/limited" @@ -172,7 +173,8 @@ func childrenConfigHandler(provider content.Provider, platform platforms.MatchCo } else { descs = append(descs, index.Manifests...) } - case images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, docker.LegacyConfigMediaType: + case images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, docker.LegacyConfigMediaType, + attestation.MediaTypeDockerSchema2AttestationType: // childless data types. return nil, nil default: diff --git a/vendor/github.com/moby/buildkit/util/imageutil/schema1.go b/vendor/github.com/moby/buildkit/util/imageutil/schema1.go index 10838bf50d..cd66d9123e 100644 --- a/vendor/github.com/moby/buildkit/util/imageutil/schema1.go +++ b/vendor/github.com/moby/buildkit/util/imageutil/schema1.go @@ -3,11 +3,12 @@ package imageutil import ( "context" "encoding/json" - "io/ioutil" + "io" "strings" "time" "github.com/containerd/containerd/remotes" + "github.com/moby/buildkit/exporter/containerimage/image" digest "github.com/opencontainers/go-digest" ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -19,7 +20,7 @@ func readSchema1Config(ctx context.Context, ref string, desc ocispecs.Descriptor return "", nil, err } defer rc.Close() - dt, err := ioutil.ReadAll(rc) + dt, err := io.ReadAll(rc) if err != nil { return "", nil, errors.Wrap(err, "failed to fetch schema1 manifest") } @@ -44,7 +45,7 @@ func convertSchema1ConfigMeta(in []byte) ([]byte, error) { return nil, errors.Errorf("invalid schema1 manifest") } - var img ocispecs.Image + var img image.Image if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), &img); err != nil { return nil, errors.Wrap(err, "failed to unmarshal image from schema 1 history") } @@ -68,7 +69,7 @@ func convertSchema1ConfigMeta(in []byte) ([]byte, error) { } } - dt, err := json.MarshalIndent(img, "", " ") + dt, err := json.MarshalIndent(img, "", " ") if err != nil { return nil, errors.Wrap(err, "failed to marshal schema1 config") } diff --git a/vendor/github.com/moby/buildkit/util/iohelper/helper.go b/vendor/github.com/moby/buildkit/util/iohelper/helper.go new file mode 100644 index 0000000000..e0ebaf9bb5 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/iohelper/helper.go @@ -0,0 +1,63 @@ +package iohelper + +import ( + "io" + "sync" + + "github.com/pkg/errors" +) + +type NopWriteCloser struct { + io.Writer +} + +func (w *NopWriteCloser) Close() error { + return nil +} + +type ReadCloser struct { + io.ReadCloser + CloseFunc func() error +} + +func (rc *ReadCloser) Close() error { + err1 := rc.ReadCloser.Close() + err2 := rc.CloseFunc() + if err1 != nil { + return errors.Wrapf(err1, "failed to close: %v", err2) + } + return err2 +} + +type WriteCloser struct { + io.WriteCloser + CloseFunc func() error +} + +func (wc *WriteCloser) Close() error { + err1 := wc.WriteCloser.Close() + err2 := wc.CloseFunc() + if err1 != nil { + return errors.Wrapf(err1, "failed to close: %v", err2) + } + return err2 +} + +type Counter struct { + n int64 + mu sync.Mutex +} + +func (c *Counter) Write(p []byte) (n int, err error) { + c.mu.Lock() + c.n += int64(len(p)) + c.mu.Unlock() + return len(p), nil +} + +func (c *Counter) Size() (n int64) { + c.mu.Lock() + n = c.n + c.mu.Unlock() + return +} diff --git a/vendor/github.com/moby/buildkit/util/network/host.go b/vendor/github.com/moby/buildkit/util/network/host.go index c50268d45f..fbd6747d00 100644 --- a/vendor/github.com/moby/buildkit/util/network/host.go +++ b/vendor/github.com/moby/buildkit/util/network/host.go @@ -4,6 +4,8 @@ package network import ( + "context" + "github.com/containerd/containerd/oci" specs "github.com/opencontainers/runtime-spec/specs-go" ) @@ -15,10 +17,14 @@ func NewHostProvider() Provider { type host struct { } -func (h *host) New() (Namespace, error) { +func (h *host) New(_ context.Context, hostname string) (Namespace, error) { return &hostNS{}, nil } +func (h *host) Close() error { + return nil +} + type hostNS struct { } diff --git a/vendor/github.com/moby/buildkit/util/network/network.go b/vendor/github.com/moby/buildkit/util/network/network.go index befeef0c75..c48f1984f0 100644 --- a/vendor/github.com/moby/buildkit/util/network/network.go +++ b/vendor/github.com/moby/buildkit/util/network/network.go @@ -1,6 +1,7 @@ package network import ( + "context" "io" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -8,7 +9,8 @@ import ( // Provider interface for Network type Provider interface { - New() (Namespace, error) + io.Closer + New(ctx context.Context, hostname string) (Namespace, error) } // Namespace of network for workers diff --git a/vendor/github.com/moby/buildkit/util/network/none.go b/vendor/github.com/moby/buildkit/util/network/none.go index 336ff68b91..e2b9d122d6 100644 --- a/vendor/github.com/moby/buildkit/util/network/none.go +++ b/vendor/github.com/moby/buildkit/util/network/none.go @@ -1,6 +1,8 @@ package network import ( + "context" + specs "github.com/opencontainers/runtime-spec/specs-go" ) @@ -11,10 +13,14 @@ func NewNoneProvider() Provider { type none struct { } -func (h *none) New() (Namespace, error) { +func (h *none) New(_ context.Context, hostname string) (Namespace, error) { return &noneNS{}, nil } +func (h *none) Close() error { + return nil +} + type noneNS struct { } diff --git a/vendor/github.com/moby/buildkit/util/overlay/overlay_linux.go b/vendor/github.com/moby/buildkit/util/overlay/overlay_linux.go index 12f153f0b6..f2f69bba06 100644 --- a/vendor/github.com/moby/buildkit/util/overlay/overlay_linux.go +++ b/vendor/github.com/moby/buildkit/util/overlay/overlay_linux.go @@ -8,7 +8,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -114,7 +113,7 @@ func GetOverlayLayers(m mount.Mount) ([]string, error) { // WriteUpperdir writes a layer tar archive into the specified writer, based on // the diff information stored in the upperdir. func WriteUpperdir(ctx context.Context, w io.Writer, upperdir string, lower []mount.Mount) error { - emptyLower, err := ioutil.TempDir("", "buildkit") // empty directory used for the lower of diff view + emptyLower, err := os.MkdirTemp("", "buildkit") // empty directory used for the lower of diff view if err != nil { return errors.Wrapf(err, "failed to create temp dir") } @@ -183,7 +182,7 @@ func Changes(ctx context.Context, changeFn fs.ChangeFunc, upperdir, upperdirView } else if redirect { // Return error when redirect_dir is enabled which can result to a wrong diff. // TODO: support redirect_dir - return fmt.Errorf("redirect_dir is used but it's not supported in overlayfs differ") + return errors.New("redirect_dir is used but it's not supported in overlayfs differ") } // Check if this is a deleted entry diff --git a/vendor/github.com/moby/buildkit/util/progress/multireader.go b/vendor/github.com/moby/buildkit/util/progress/multireader.go index 8d8bbf54c5..b0d92dde8f 100644 --- a/vendor/github.com/moby/buildkit/util/progress/multireader.go +++ b/vendor/github.com/moby/buildkit/util/progress/multireader.go @@ -12,6 +12,7 @@ type MultiReader struct { initialized bool done chan struct{} writers map[*progressWriter]func() + sent []*Progress } func NewMultiReader(pr Reader) *MultiReader { @@ -31,9 +32,61 @@ func (mr *MultiReader) Reader(ctx context.Context) Reader { pw, _, ctx := NewFromContext(ctx) w := pw.(*progressWriter) - mr.writers[w] = closeWriter + + isBehind := len(mr.sent) > 0 + + select { + case <-mr.done: + isBehind = true + default: + if !isBehind { + mr.writers[w] = closeWriter + } + } go func() { + if isBehind { + close := func() { + w.Close() + closeWriter() + } + i := 0 + for { + mr.mu.Lock() + sent := mr.sent + count := len(sent) - i + if count == 0 { + select { + case <-ctx.Done(): + close() + mr.mu.Unlock() + return + case <-mr.done: + close() + mr.mu.Unlock() + return + default: + } + mr.writers[w] = closeWriter + mr.mu.Unlock() + break + } + mr.mu.Unlock() + for i, p := range sent[i:] { + w.writeRawProgress(p) + if i%100 == 0 { + select { + case <-ctx.Done(): + close() + return + default: + } + } + } + i += count + } + } + select { case <-ctx.Done(): case <-mr.done: @@ -61,6 +114,7 @@ func (mr *MultiReader) handle() error { w.Close() c() } + close(mr.done) mr.mu.Unlock() return nil } @@ -72,6 +126,7 @@ func (mr *MultiReader) handle() error { w.writeRawProgress(p) } } + mr.sent = append(mr.sent, p...) mr.mu.Unlock() } } diff --git a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go index 1ce37ea210..a856db8caa 100644 --- a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go +++ b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go @@ -36,9 +36,7 @@ func (ps *MultiWriter) Add(pw Writer) { } ps.mu.Lock() plist := make([]*Progress, 0, len(ps.items)) - for _, p := range ps.items { - plist = append(plist, p) - } + plist = append(plist, ps.items...) sort.Slice(plist, func(i, j int) bool { return plist[i].Timestamp.Before(plist[j].Timestamp) }) diff --git a/vendor/github.com/moby/buildkit/util/progress/progress.go b/vendor/github.com/moby/buildkit/util/progress/progress.go index 83ca6672a8..fbbb22de07 100644 --- a/vendor/github.com/moby/buildkit/util/progress/progress.go +++ b/vendor/github.com/moby/buildkit/util/progress/progress.go @@ -118,12 +118,22 @@ func (pr *progressReader) Read(ctx context.Context) ([]*Progress, error) { done := make(chan struct{}) defer close(done) go func() { - select { - case <-done: - case <-ctx.Done(): - pr.mu.Lock() - pr.cond.Broadcast() - pr.mu.Unlock() + prdone := pr.ctx.Done() + for { + select { + case <-done: + return + case <-ctx.Done(): + pr.mu.Lock() + pr.cond.Broadcast() + pr.mu.Unlock() + return + case <-prdone: + pr.mu.Lock() + pr.cond.Broadcast() + pr.mu.Unlock() + prdone = nil + } } }() pr.mu.Lock() @@ -274,3 +284,20 @@ func (pw *noOpWriter) Write(_ string, _ interface{}) error { func (pw *noOpWriter) Close() error { return nil } + +func OneOff(ctx context.Context, id string) func(err error) error { + pw, _, _ := NewFromContext(ctx) + now := time.Now() + st := Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} diff --git a/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go b/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go index b743706ed9..93c50106f7 100644 --- a/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go +++ b/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go @@ -106,6 +106,8 @@ func trackProgress(ctx context.Context, desc ocispecs.Descriptor, manager PullMa select { case <-ctx.Done(): onFinalStatus = true + // we need a context for the manager.Status() calls to pass once. after that this function will exit + ctx = context.TODO() case <-ticker.C: } diff --git a/vendor/github.com/moby/buildkit/util/purl/image.go b/vendor/github.com/moby/buildkit/util/purl/image.go new file mode 100644 index 0000000000..b3364ba4ce --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/purl/image.go @@ -0,0 +1,117 @@ +package purl + +import ( + "strings" + + "github.com/containerd/containerd/platforms" + "github.com/docker/distribution/reference" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + packageurl "github.com/package-url/packageurl-go" + "github.com/pkg/errors" +) + +// RefToPURL converts an image reference with optional platform constraint to a package URL. +// Image references are defined in https://github.com/distribution/distribution/blob/v2.8.1/reference/reference.go#L1 +// Package URLs are defined in https://github.com/package-url/purl-spec +func RefToPURL(ref string, platform *ocispecs.Platform) (string, error) { + named, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return "", errors.Wrapf(err, "failed to parse ref %q", ref) + } + var qualifiers []packageurl.Qualifier + + if canonical, ok := named.(reference.Canonical); ok { + qualifiers = append(qualifiers, packageurl.Qualifier{ + Key: "digest", + Value: canonical.Digest().String(), + }) + } else { + named = reference.TagNameOnly(named) + } + + version := "" + if tagged, ok := named.(reference.Tagged); ok { + version = tagged.Tag() + } + + name := reference.FamiliarName(named) + + ns := "" + parts := strings.Split(name, "/") + if len(parts) > 1 { + ns = strings.Join(parts[:len(parts)-1], "/") + } + name = parts[len(parts)-1] + + if platform != nil { + p := platforms.Normalize(*platform) + qualifiers = append(qualifiers, packageurl.Qualifier{ + Key: "platform", + Value: platforms.Format(p), + }) + } + + p := packageurl.NewPackageURL("docker", ns, name, version, qualifiers, "") + return p.ToString(), nil +} + +// PURLToRef converts a package URL to an image reference and platform. +func PURLToRef(purl string) (string, *ocispecs.Platform, error) { + p, err := packageurl.FromString(purl) + if err != nil { + return "", nil, err + } + if p.Type != "docker" { + return "", nil, errors.Errorf("invalid package type %q, expecting docker", p.Type) + } + ref := p.Name + if p.Namespace != "" { + ref = p.Namespace + "/" + ref + } + dgstVersion := "" + if p.Version != "" { + dgst, err := digest.Parse(p.Version) + if err == nil { + ref = ref + "@" + dgst.String() + dgstVersion = dgst.String() + } else { + ref += ":" + p.Version + } + } + var platform *ocispecs.Platform + for _, q := range p.Qualifiers { + if q.Key == "platform" { + p, err := platforms.Parse(q.Value) + if err != nil { + return "", nil, err + } + platform = &p + } + if q.Key == "digest" { + if dgstVersion != "" { + if dgstVersion != q.Value { + return "", nil, errors.Errorf("digest %q does not match version %q", q.Value, dgstVersion) + } + continue + } + dgst, err := digest.Parse(q.Value) + if err != nil { + return "", nil, err + } + ref = ref + "@" + dgst.String() + dgstVersion = dgst.String() + } + } + + if dgstVersion == "" && p.Version == "" { + ref += ":latest" + } + + named, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return "", nil, errors.Wrapf(err, "invalid image url %q", purl) + } + + return named.String(), platform, nil +} diff --git a/vendor/github.com/moby/buildkit/util/push/push.go b/vendor/github.com/moby/buildkit/util/push/push.go index ffa3d35f32..881b2fd86f 100644 --- a/vendor/github.com/moby/buildkit/util/push/push.go +++ b/vendor/github.com/moby/buildkit/util/push/push.go @@ -6,7 +6,6 @@ import ( "fmt" "strings" "sync" - "time" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" @@ -16,6 +15,7 @@ import ( "github.com/containerd/containerd/remotes/docker" "github.com/docker/distribution/reference" "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/attestation" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/imageutil" "github.com/moby/buildkit/util/progress" @@ -126,7 +126,7 @@ func Push(ctx context.Context, sm *session.Manager, sid string, provider content return err } - layersDone := oneOffProgress(ctx, "pushing layers") + layersDone := progress.OneOff(ctx, "pushing layers") err = images.Dispatch(ctx, skipNonDistributableBlobs(images.Handlers(handlers...)), nil, ocispecs.Descriptor{ Digest: dgst, Size: ra.Size(), @@ -136,7 +136,7 @@ func Push(ctx context.Context, sm *session.Manager, sid string, provider content return err } - mfstDone := oneOffProgress(ctx, fmt.Sprintf("pushing manifest for %s", ref)) + mfstDone := progress.OneOff(ctx, fmt.Sprintf("pushing manifest for %s", ref)) for i := len(manifestStack) - 1; i >= 0; i-- { if _, err := pushHandler(ctx, manifestStack[i]); err != nil { return mfstDone(err) @@ -212,23 +212,6 @@ func annotateDistributionSourceHandler(manager content.Manager, annotations map[ } } -func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.NewFromContext(ctx) - now := time.Now() - st := progress.Status{ - Started: &now, - } - pw.Write(id, st) - return func(err error) error { - // TODO: set error on status - now := time.Now() - st.Completed = &now - pw.Write(id, st) - pw.Close() - return err - } -} - func childrenHandler(provider content.Provider) images.HandlerFunc { return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { var descs []ocispecs.Descriptor @@ -266,7 +249,8 @@ func childrenHandler(provider content.Provider) images.HandlerFunc { } case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, - ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerGzip: + ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerGzip, + attestation.MediaTypeDockerSchema2AttestationType: // childless data types. return nil, nil default: diff --git a/vendor/github.com/moby/buildkit/util/resolver/authorizer.go b/vendor/github.com/moby/buildkit/util/resolver/authorizer.go index ed8034ccbc..6a4140d68b 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/authorizer.go +++ b/vendor/github.com/moby/buildkit/util/resolver/authorizer.go @@ -279,7 +279,7 @@ func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) { username, secret := ah.common.Username, ah.common.Secret if username == "" || secret == "" { - return "", fmt.Errorf("failed to handle basic auth because missing username or secret") + return "", errors.New("failed to handle basic auth because missing username or secret") } auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret)) diff --git a/vendor/github.com/moby/buildkit/util/resolver/resolver.go b/vendor/github.com/moby/buildkit/util/resolver/resolver.go index a23f4b15cf..0639a1b623 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/resolver.go +++ b/vendor/github.com/moby/buildkit/util/resolver/resolver.go @@ -3,7 +3,6 @@ package resolver import ( "crypto/tls" "crypto/x509" - "io/ioutil" "net" "net/http" "os" @@ -67,7 +66,7 @@ func fillInsecureOpts(host string, c config.RegistryConfig, h docker.RegistryHos func loadTLSConfig(c config.RegistryConfig) (*tls.Config, error) { for _, d := range c.TLSConfigDir { - fs, err := ioutil.ReadDir(d) + fs, err := os.ReadDir(d) if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) { return nil, errors.WithStack(err) } @@ -98,7 +97,7 @@ func loadTLSConfig(c config.RegistryConfig) (*tls.Config, error) { } for _, p := range c.RootCAs { - dt, err := ioutil.ReadFile(p) + dt, err := os.ReadFile(p) if err != nil { return nil, errors.Wrapf(err, "failed to read %s", p) } diff --git a/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go b/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go index 554076b07b..1a2f54ed76 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go +++ b/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go @@ -14,6 +14,10 @@ import ( "github.com/pkg/errors" ) +// MaxRetryBackoff is the maximum backoff time before giving up. This is a +// variable so that code which embeds BuildKit can override the default value. +var MaxRetryBackoff = 8 * time.Second + func New(f images.HandlerFunc, logger func([]byte)) images.HandlerFunc { return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { backoff := time.Second @@ -35,7 +39,7 @@ func New(f images.HandlerFunc, logger func([]byte)) images.HandlerFunc { return descs, nil } // backoff logic - if backoff >= 8*time.Second { + if backoff >= MaxRetryBackoff { return nil, err } if logger != nil { @@ -60,7 +64,7 @@ func retryError(err error) bool { return true } // catches TLS timeout or other network-related temporary errors - if ne, ok := errors.Cause(err).(net.Error); ok && ne.Temporary() { + if ne, ok := errors.Cause(err).(net.Error); ok && ne.Temporary() { //nolint:staticcheck // ignoring "SA1019: Temporary is deprecated", continue to propagate net.Error through the "temporary" status return true } // https://github.com/containerd/containerd/pull/4724 diff --git a/vendor/github.com/moby/buildkit/util/sshutil/keyscan.go b/vendor/github.com/moby/buildkit/util/sshutil/keyscan.go index 3c7583ffdd..163efee80e 100644 --- a/vendor/github.com/moby/buildkit/util/sshutil/keyscan.go +++ b/vendor/github.com/moby/buildkit/util/sshutil/keyscan.go @@ -1,6 +1,7 @@ package sshutil import ( + "errors" "fmt" "net" "strconv" @@ -11,7 +12,7 @@ import ( const defaultPort = 22 -var errCallbackDone = fmt.Errorf("callback failed on purpose") +var errCallbackDone = errors.New("callback failed on purpose") // addDefaultPort appends a default port if hostport doesn't contain one func addDefaultPort(hostport string, defaultPort int) string { diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.go b/vendor/github.com/moby/buildkit/util/stack/stack.go index 3409ac047a..18d03630b4 100644 --- a/vendor/github.com/moby/buildkit/util/stack/stack.go +++ b/vendor/github.com/moby/buildkit/util/stack/stack.go @@ -79,7 +79,7 @@ func Enable(err error) error { return err } -func Wrap(err error, s Stack) error { +func Wrap(err error, s *Stack) error { return &withStack{stack: s, error: err} } @@ -151,7 +151,7 @@ func convertStack(s errors.StackTrace) *Stack { if idx == -1 { continue } - line, err := strconv.Atoi(p[1][idx+1:]) + line, err := strconv.ParseInt(p[1][idx+1:], 10, 32) if err != nil { continue } @@ -169,7 +169,7 @@ func convertStack(s errors.StackTrace) *Stack { } type withStack struct { - stack Stack + stack *Stack error } @@ -178,5 +178,5 @@ func (e *withStack) Unwrap() error { } func (e *withStack) StackTrace() *Stack { - return &e.stack + return e.stack } diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.pb.go b/vendor/github.com/moby/buildkit/util/stack/stack.pb.go index df55582db4..c4a73a68f4 100644 --- a/vendor/github.com/moby/buildkit/util/stack/stack.pb.go +++ b/vendor/github.com/moby/buildkit/util/stack/stack.pb.go @@ -1,172 +1,261 @@ // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.11.4 // source: stack.proto package stack import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Stack struct { - Frames []*Frame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"` - Cmdline []string `protobuf:"bytes,2,rep,name=cmdline,proto3" json:"cmdline,omitempty"` - Pid int32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` - Revision string `protobuf:"bytes,5,opt,name=revision,proto3" json:"revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Frames []*Frame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"` + Cmdline []string `protobuf:"bytes,2,rep,name=cmdline,proto3" json:"cmdline,omitempty"` + Pid int32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + Revision string `protobuf:"bytes,5,opt,name=revision,proto3" json:"revision,omitempty"` } -func (m *Stack) Reset() { *m = Stack{} } -func (m *Stack) String() string { return proto.CompactTextString(m) } -func (*Stack) ProtoMessage() {} +func (x *Stack) Reset() { + *x = Stack{} + if protoimpl.UnsafeEnabled { + mi := &file_stack_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Stack) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Stack) ProtoMessage() {} + +func (x *Stack) ProtoReflect() protoreflect.Message { + mi := &file_stack_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Stack.ProtoReflect.Descriptor instead. func (*Stack) Descriptor() ([]byte, []int) { - return fileDescriptor_b44c07feb2ca0a5a, []int{0} + return file_stack_proto_rawDescGZIP(), []int{0} } -func (m *Stack) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Stack.Unmarshal(m, b) -} -func (m *Stack) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Stack.Marshal(b, m, deterministic) -} -func (m *Stack) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stack.Merge(m, src) -} -func (m *Stack) XXX_Size() int { - return xxx_messageInfo_Stack.Size(m) -} -func (m *Stack) XXX_DiscardUnknown() { - xxx_messageInfo_Stack.DiscardUnknown(m) -} - -var xxx_messageInfo_Stack proto.InternalMessageInfo - -func (m *Stack) GetFrames() []*Frame { - if m != nil { - return m.Frames +func (x *Stack) GetFrames() []*Frame { + if x != nil { + return x.Frames } return nil } -func (m *Stack) GetCmdline() []string { - if m != nil { - return m.Cmdline +func (x *Stack) GetCmdline() []string { + if x != nil { + return x.Cmdline } return nil } -func (m *Stack) GetPid() int32 { - if m != nil { - return m.Pid +func (x *Stack) GetPid() int32 { + if x != nil { + return x.Pid } return 0 } -func (m *Stack) GetVersion() string { - if m != nil { - return m.Version +func (x *Stack) GetVersion() string { + if x != nil { + return x.Version } return "" } -func (m *Stack) GetRevision() string { - if m != nil { - return m.Revision +func (x *Stack) GetRevision() string { + if x != nil { + return x.Revision } return "" } type Frame struct { - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"` - Line int32 `protobuf:"varint,3,opt,name=Line,proto3" json:"Line,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"` + Line int32 `protobuf:"varint,3,opt,name=Line,proto3" json:"Line,omitempty"` } -func (m *Frame) Reset() { *m = Frame{} } -func (m *Frame) String() string { return proto.CompactTextString(m) } -func (*Frame) ProtoMessage() {} +func (x *Frame) Reset() { + *x = Frame{} + if protoimpl.UnsafeEnabled { + mi := &file_stack_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Frame) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Frame) ProtoMessage() {} + +func (x *Frame) ProtoReflect() protoreflect.Message { + mi := &file_stack_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Frame.ProtoReflect.Descriptor instead. func (*Frame) Descriptor() ([]byte, []int) { - return fileDescriptor_b44c07feb2ca0a5a, []int{1} + return file_stack_proto_rawDescGZIP(), []int{1} } -func (m *Frame) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Frame.Unmarshal(m, b) -} -func (m *Frame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Frame.Marshal(b, m, deterministic) -} -func (m *Frame) XXX_Merge(src proto.Message) { - xxx_messageInfo_Frame.Merge(m, src) -} -func (m *Frame) XXX_Size() int { - return xxx_messageInfo_Frame.Size(m) -} -func (m *Frame) XXX_DiscardUnknown() { - xxx_messageInfo_Frame.DiscardUnknown(m) -} - -var xxx_messageInfo_Frame proto.InternalMessageInfo - -func (m *Frame) GetName() string { - if m != nil { - return m.Name +func (x *Frame) GetName() string { + if x != nil { + return x.Name } return "" } -func (m *Frame) GetFile() string { - if m != nil { - return m.File +func (x *Frame) GetFile() string { + if x != nil { + return x.File } return "" } -func (m *Frame) GetLine() int32 { - if m != nil { - return m.Line +func (x *Frame) GetLine() int32 { + if x != nil { + return x.Line } return 0 } -func init() { - proto.RegisterType((*Stack)(nil), "stack.Stack") - proto.RegisterType((*Frame)(nil), "stack.Frame") +var File_stack_proto protoreflect.FileDescriptor + +var file_stack_proto_rawDesc = []byte{ + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x73, + 0x74, 0x61, 0x63, 0x6b, 0x22, 0x8f, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x24, + 0x0a, 0x06, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x52, 0x06, 0x66, 0x72, + 0x61, 0x6d, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6d, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6d, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x43, 0x0a, 0x05, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x4c, 0x69, 0x6e, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } -func init() { - proto.RegisterFile("stack.proto", fileDescriptor_b44c07feb2ca0a5a) +var ( + file_stack_proto_rawDescOnce sync.Once + file_stack_proto_rawDescData = file_stack_proto_rawDesc +) + +func file_stack_proto_rawDescGZIP() []byte { + file_stack_proto_rawDescOnce.Do(func() { + file_stack_proto_rawDescData = protoimpl.X.CompressGZIP(file_stack_proto_rawDescData) + }) + return file_stack_proto_rawDescData } -var fileDescriptor_b44c07feb2ca0a5a = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x8f, 0x3d, 0xce, 0x82, 0x40, - 0x10, 0x86, 0xb3, 0xdf, 0xb2, 0x7c, 0x3a, 0x58, 0x98, 0xa9, 0x36, 0x56, 0x1b, 0x62, 0x41, 0x45, - 0xa1, 0x47, 0x30, 0xa1, 0x32, 0x16, 0x78, 0x02, 0x84, 0x35, 0xd9, 0xc8, 0x5f, 0x76, 0x09, 0xd7, - 0xf0, 0xca, 0x66, 0x06, 0xb4, 0x7b, 0xde, 0x9f, 0xe4, 0x9d, 0x81, 0x24, 0x4c, 0x55, 0xfd, 0xca, - 0x47, 0x3f, 0x4c, 0x03, 0x2a, 0x16, 0xe9, 0x5b, 0x80, 0xba, 0x13, 0xe1, 0x11, 0xe2, 0xa7, 0xaf, - 0x3a, 0x1b, 0xb4, 0x30, 0x32, 0x4b, 0x4e, 0xbb, 0x7c, 0xa9, 0x17, 0x64, 0x96, 0x6b, 0x86, 0x1a, - 0xfe, 0xeb, 0xae, 0x69, 0x5d, 0x6f, 0xf5, 0x9f, 0x91, 0xd9, 0xb6, 0xfc, 0x4a, 0xdc, 0x83, 0x1c, - 0x5d, 0xa3, 0xa5, 0x11, 0x99, 0x2a, 0x09, 0xa9, 0x3b, 0x5b, 0x1f, 0xdc, 0xd0, 0xeb, 0xc8, 0x08, - 0xea, 0xae, 0x12, 0x0f, 0xb0, 0xf1, 0x76, 0x76, 0x1c, 0x29, 0x8e, 0x7e, 0x3a, 0xbd, 0x80, 0xe2, - 0x49, 0x44, 0x88, 0x6e, 0x55, 0x67, 0xb5, 0xe0, 0x02, 0x33, 0x79, 0x85, 0x6b, 0x69, 0x9b, 0x3d, - 0x62, 0xf2, 0xae, 0x74, 0xcf, 0xb2, 0xcc, 0xfc, 0x88, 0xf9, 0xc9, 0xf3, 0x27, 0x00, 0x00, 0xff, - 0xff, 0xfd, 0x2c, 0xbb, 0xfb, 0xf3, 0x00, 0x00, 0x00, +var file_stack_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_stack_proto_goTypes = []interface{}{ + (*Stack)(nil), // 0: stack.Stack + (*Frame)(nil), // 1: stack.Frame +} +var file_stack_proto_depIdxs = []int32{ + 1, // 0: stack.Stack.frames:type_name -> stack.Frame + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_stack_proto_init() } +func file_stack_proto_init() { + if File_stack_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_stack_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stack); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_stack_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Frame); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_stack_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_stack_proto_goTypes, + DependencyIndexes: file_stack_proto_depIdxs, + MessageInfos: file_stack_proto_msgTypes, + }.Build() + File_stack_proto = out.File + file_stack_proto_rawDesc = nil + file_stack_proto_goTypes = nil + file_stack_proto_depIdxs = nil } diff --git a/vendor/github.com/moby/buildkit/util/staticfs/merge.go b/vendor/github.com/moby/buildkit/util/staticfs/merge.go new file mode 100644 index 0000000000..d680b80cfc --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/staticfs/merge.go @@ -0,0 +1,110 @@ +package staticfs + +import ( + "context" + "io" + "io/fs" + "os" + "path/filepath" + + "github.com/tonistiigi/fsutil" + "golang.org/x/sync/errgroup" +) + +type MergeFS struct { + Lower fsutil.FS + Upper fsutil.FS +} + +var _ fsutil.FS = &MergeFS{} + +func NewMergeFS(lower, upper fsutil.FS) *MergeFS { + return &MergeFS{ + Lower: lower, + Upper: upper, + } +} + +type record struct { + path string + fi fs.FileInfo + err error +} + +func (r *record) key() string { + if r == nil { + return "" + } + return convertPathToKey(r.path) +} + +func (mfs *MergeFS) Walk(ctx context.Context, fn filepath.WalkFunc) error { + ch1 := make(chan *record, 10) + ch2 := make(chan *record, 10) + + eg, ctx := errgroup.WithContext(ctx) + eg.Go(func() error { + defer close(ch1) + return mfs.Lower.Walk(ctx, func(path string, info fs.FileInfo, err error) error { + select { + case ch1 <- &record{path: path, fi: info, err: err}: + case <-ctx.Done(): + } + return ctx.Err() + }) + }) + eg.Go(func() error { + defer close(ch2) + return mfs.Upper.Walk(ctx, func(path string, info fs.FileInfo, err error) error { + select { + case ch2 <- &record{path: path, fi: info, err: err}: + case <-ctx.Done(): + } + return ctx.Err() + }) + }) + + eg.Go(func() error { + next1, ok1 := <-ch1 + key1 := next1.key() + next2, ok2 := <-ch2 + key2 := next2.key() + + for { + if !ok1 && !ok2 { + break + } + if !ok2 || ok1 && key1 < key2 { + if err := fn(next1.path, next1.fi, next1.err); err != nil { + return err + } + next1, ok1 = <-ch1 + key1 = next1.key() + } else if !ok1 || ok2 && key1 >= key2 { + if err := fn(next2.path, next2.fi, next2.err); err != nil { + return err + } + if ok1 && key1 == key2 { + next1, ok1 = <-ch1 + key1 = next1.key() + } + next2, ok2 = <-ch2 + key2 = next2.key() + } + } + return nil + }) + + return eg.Wait() +} + +func (mfs *MergeFS) Open(p string) (io.ReadCloser, error) { + r, err := mfs.Upper.Open(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + return mfs.Lower.Open(p) + } + return r, nil +} diff --git a/vendor/github.com/moby/buildkit/util/staticfs/static.go b/vendor/github.com/moby/buildkit/util/staticfs/static.go new file mode 100644 index 0000000000..3b00060688 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/staticfs/static.go @@ -0,0 +1,74 @@ +package staticfs + +import ( + "bytes" + "context" + "io" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/tonistiigi/fsutil" + "github.com/tonistiigi/fsutil/types" +) + +type File struct { + Stat types.Stat + Data []byte +} + +type FS struct { + files map[string]File +} + +var _ fsutil.FS = &FS{} + +func NewFS() *FS { + return &FS{ + files: map[string]File{}, + } +} + +func (fs *FS) Add(p string, stat types.Stat, data []byte) { + stat.Size_ = int64(len(data)) + if stat.Mode == 0 { + stat.Mode = 0644 + } + stat.Path = p + fs.files[p] = File{ + Stat: stat, + Data: data, + } +} + +func (fs *FS) Walk(ctx context.Context, fn filepath.WalkFunc) error { + keys := make([]string, 0, len(fs.files)) + for k := range fs.files { + keys = append(keys, convertPathToKey(k)) + } + sort.Strings(keys) + for _, k := range keys { + p := convertKeyToPath(k) + st := fs.files[p].Stat + if err := fn(p, &fsutil.StatInfo{Stat: &st}, nil); err != nil { + return err + } + } + return nil +} + +func (fs *FS) Open(p string) (io.ReadCloser, error) { + if f, ok := fs.files[p]; ok { + return io.NopCloser(bytes.NewReader(f.Data)), nil + } + return nil, os.ErrNotExist +} + +func convertPathToKey(p string) string { + return strings.Replace(p, "/", "\x00", -1) +} + +func convertKeyToPath(p string) string { + return strings.Replace(p, "\x00", "/", -1) +} diff --git a/vendor/github.com/moby/buildkit/util/system/atime_unix.go b/vendor/github.com/moby/buildkit/util/system/atime_unix.go new file mode 100644 index 0000000000..9a7af36ffc --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/atime_unix.go @@ -0,0 +1,21 @@ +//go:build !windows +// +build !windows + +package system + +import ( + iofs "io/fs" + "syscall" + "time" + + "github.com/containerd/continuity/fs" + "github.com/pkg/errors" +) + +func Atime(st iofs.FileInfo) (time.Time, error) { + stSys, ok := st.Sys().(*syscall.Stat_t) + if !ok { + return time.Time{}, errors.Errorf("expected st.Sys() to be *syscall.Stat_t, got %T", st.Sys()) + } + return fs.StatATimeAsTime(stSys), nil +} diff --git a/vendor/github.com/moby/buildkit/util/system/atime_windows.go b/vendor/github.com/moby/buildkit/util/system/atime_windows.go new file mode 100644 index 0000000000..808408b613 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/atime_windows.go @@ -0,0 +1,17 @@ +package system + +import ( + "fmt" + iofs "io/fs" + "syscall" + "time" +) + +func Atime(st iofs.FileInfo) (time.Time, error) { + stSys, ok := st.Sys().(*syscall.Win32FileAttributeData) + if !ok { + return time.Time{}, fmt.Errorf("expected st.Sys() to be *syscall.Win32FileAttributeData, got %T", st.Sys()) + } + // ref: https://github.com/golang/go/blob/go1.19.2/src/os/types_windows.go#L230 + return time.Unix(0, stSys.LastAccessTime.Nanoseconds()), nil +} diff --git a/vendor/github.com/moby/buildkit/util/system/path_windows.go b/vendor/github.com/moby/buildkit/util/system/path_windows.go index 8514166827..cc7b664d8b 100644 --- a/vendor/github.com/moby/buildkit/util/system/path_windows.go +++ b/vendor/github.com/moby/buildkit/util/system/path_windows.go @@ -4,9 +4,10 @@ package system import ( - "fmt" "path/filepath" "strings" + + "github.com/pkg/errors" ) // CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. @@ -22,13 +23,13 @@ import ( // d:\ --> Fail func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) + return "", errors.Errorf("No relative path specified in %q", path) } if !filepath.IsAbs(path) || len(path) < 2 { return filepath.FromSlash(path), nil } if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") + return "", errors.New("The specified path is not on the system drive (C:)") } return filepath.FromSlash(path[2:]), nil } diff --git a/vendor/github.com/moby/buildkit/util/throttle/throttle.go b/vendor/github.com/moby/buildkit/util/throttle/throttle.go index dfc4aefa90..249b17dd49 100644 --- a/vendor/github.com/moby/buildkit/util/throttle/throttle.go +++ b/vendor/github.com/moby/buildkit/util/throttle/throttle.go @@ -31,7 +31,7 @@ func throttle(d time.Duration, f func(), wait bool) func() { go func() { for { mu.Lock() - if next == false { + if !next { running = false mu.Unlock() return diff --git a/vendor/github.com/moby/buildkit/util/tracing/detect/detect.go b/vendor/github.com/moby/buildkit/util/tracing/detect/detect.go new file mode 100644 index 0000000000..13e54bdefc --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/tracing/detect/detect.go @@ -0,0 +1,175 @@ +package detect + +import ( + "context" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + + "github.com/moby/buildkit/util/bklog" + "github.com/pkg/errors" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + "go.opentelemetry.io/otel/trace" +) + +type ExporterDetector func() (sdktrace.SpanExporter, error) + +type detector struct { + f ExporterDetector + priority int +} + +var ServiceName string +var Recorder *TraceRecorder + +var detectors map[string]detector +var once sync.Once +var tp trace.TracerProvider +var exporter sdktrace.SpanExporter +var closers []func(context.Context) error +var err error + +func Register(name string, exp ExporterDetector, priority int) { + if detectors == nil { + detectors = map[string]detector{} + } + detectors[name] = detector{ + f: exp, + priority: priority, + } +} + +func detectExporter() (sdktrace.SpanExporter, error) { + if n := os.Getenv("OTEL_TRACES_EXPORTER"); n != "" { + d, ok := detectors[n] + if !ok { + if n == "none" { + return nil, nil + } + return nil, errors.Errorf("unsupported opentelemetry tracer %v", n) + } + return d.f() + } + arr := make([]detector, 0, len(detectors)) + for _, d := range detectors { + arr = append(arr, d) + } + sort.Slice(arr, func(i, j int) bool { + return arr[i].priority < arr[j].priority + }) + for _, d := range arr { + exp, err := d.f() + if err != nil { + return nil, err + } + if exp != nil { + return exp, nil + } + } + return nil, nil +} + +func getExporter() (sdktrace.SpanExporter, error) { + exp, err := detectExporter() + if err != nil { + return nil, err + } + + if exp != nil { + exp = &threadSafeExporterWrapper{ + exporter: exp, + } + } + + if Recorder != nil { + Recorder.SpanExporter = exp + exp = Recorder + } + return exp, nil +} + +func detect() error { + tp = trace.NewNoopTracerProvider() + + exp, err := getExporter() + if err != nil || exp == nil { + return err + } + + // enable log with traceID when valid exporter + bklog.EnableLogWithTraceID(true) + + res, err := resource.Detect(context.Background(), serviceNameDetector{}) + if err != nil { + return err + } + res, err = resource.Merge(resource.Default(), res) + if err != nil { + return err + } + + sp := sdktrace.NewBatchSpanProcessor(exp) + + if Recorder != nil { + Recorder.flush = sp.ForceFlush + } + + sdktp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sp), sdktrace.WithResource(res)) + closers = append(closers, sdktp.Shutdown) + + exporter = exp + tp = sdktp + return nil +} + +func TracerProvider() (trace.TracerProvider, error) { + once.Do(func() { + if err1 := detect(); err1 != nil { + err = err1 + } + }) + b, _ := strconv.ParseBool(os.Getenv("OTEL_IGNORE_ERROR")) + if err != nil && !b { + return nil, err + } + return tp, nil +} + +func Exporter() (sdktrace.SpanExporter, error) { + _, err := TracerProvider() + if err != nil { + return nil, err + } + return exporter, nil +} + +func Shutdown(ctx context.Context) error { + for _, c := range closers { + if err := c(ctx); err != nil { + return err + } + } + return nil +} + +type serviceNameDetector struct{} + +func (serviceNameDetector) Detect(ctx context.Context) (*resource.Resource, error) { + return resource.StringDetector( + semconv.SchemaURL, + semconv.ServiceNameKey, + func() (string, error) { + if n := os.Getenv("OTEL_SERVICE_NAME"); n != "" { + return n, nil + } + if ServiceName != "" { + return ServiceName, nil + } + return filepath.Base(os.Args[0]), nil + }, + ).Detect(ctx) +} diff --git a/vendor/github.com/moby/buildkit/util/tracing/detect/otlp.go b/vendor/github.com/moby/buildkit/util/tracing/detect/otlp.go new file mode 100644 index 0000000000..aa68f876ef --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/tracing/detect/otlp.go @@ -0,0 +1,45 @@ +package detect + +import ( + "context" + "os" + + "github.com/pkg/errors" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + sdktrace "go.opentelemetry.io/otel/sdk/trace" +) + +func init() { + Register("otlp", otlpExporter, 10) +} + +func otlpExporter() (sdktrace.SpanExporter, error) { + set := os.Getenv("OTEL_TRACES_EXPORTER") == "otlp" || os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT") != "" || os.Getenv("OTEL_EXPORTER_OTLP_TRACES_ENDPOINT") != "" + if !set { + return nil, nil + } + + proto := os.Getenv("OTEL_EXPORTER_OTLP_TRACES_PROTOCOL") + if proto == "" { + proto = os.Getenv("OTEL_EXPORTER_OTLP_PROTOCOL") + } + if proto == "" { + proto = "grpc" + } + + var c otlptrace.Client + + switch proto { + case "grpc": + c = otlptracegrpc.NewClient() + case "http/protobuf": + c = otlptracehttp.NewClient() + // case "http/json": // unsupported by library + default: + return nil, errors.Errorf("unsupported otlp protocol %v", proto) + } + + return otlptrace.New(context.Background(), c) +} diff --git a/vendor/github.com/moby/buildkit/util/tracing/detect/recorder.go b/vendor/github.com/moby/buildkit/util/tracing/detect/recorder.go new file mode 100644 index 0000000000..8ff7f1dcef --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/tracing/detect/recorder.go @@ -0,0 +1,115 @@ +package detect + +import ( + "context" + "sync" + "time" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" + "go.opentelemetry.io/otel/trace" +) + +type TraceRecorder struct { + sdktrace.SpanExporter + + mu sync.Mutex + m map[trace.TraceID]*stubs + listeners map[trace.TraceID]int + flush func(context.Context) error +} + +type stubs struct { + spans []tracetest.SpanStub + last time.Time +} + +func NewTraceRecorder() *TraceRecorder { + tr := &TraceRecorder{ + m: map[trace.TraceID]*stubs{}, + listeners: map[trace.TraceID]int{}, + } + + go func() { + t := time.NewTimer(60 * time.Second) + for { + <-t.C + tr.gc() + t.Reset(50 * time.Second) + } + }() + + return tr +} + +func (r *TraceRecorder) Record(traceID trace.TraceID) func() []tracetest.SpanStub { + r.mu.Lock() + defer r.mu.Unlock() + + r.listeners[traceID]++ + var once sync.Once + var spans []tracetest.SpanStub + return func() []tracetest.SpanStub { + once.Do(func() { + if r.flush != nil { + r.flush(context.TODO()) + } + + r.mu.Lock() + defer r.mu.Unlock() + + if v, ok := r.m[traceID]; ok { + spans = v.spans + } + r.listeners[traceID]-- + if r.listeners[traceID] == 0 { + delete(r.listeners, traceID) + } + }) + return spans + } +} + +func (r *TraceRecorder) gc() { + r.mu.Lock() + defer r.mu.Unlock() + + now := time.Now() + for k, s := range r.m { + if _, ok := r.listeners[k]; ok { + continue + } + if now.Sub(s.last) > 60*time.Second { + delete(r.m, k) + } + } +} + +func (r *TraceRecorder) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { + r.mu.Lock() + + now := time.Now() + for _, s := range spans { + ss := tracetest.SpanStubFromReadOnlySpan(s) + v, ok := r.m[ss.SpanContext.TraceID()] + if !ok { + v = &stubs{} + r.m[s.SpanContext().TraceID()] = v + } + v.last = now + v.spans = append(v.spans, ss) + } + r.mu.Unlock() + + if r.SpanExporter == nil { + return nil + } + return r.SpanExporter.ExportSpans(ctx, spans) +} + +func (r *TraceRecorder) Shutdown(ctx context.Context) error { + if r.SpanExporter == nil { + return nil + } + return r.SpanExporter.Shutdown(ctx) +} diff --git a/vendor/github.com/moby/buildkit/util/tracing/detect/threadsafe.go b/vendor/github.com/moby/buildkit/util/tracing/detect/threadsafe.go new file mode 100644 index 0000000000..51d14448df --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/tracing/detect/threadsafe.go @@ -0,0 +1,26 @@ +package detect + +import ( + "context" + "sync" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" +) + +// threadSafeExporterWrapper wraps an OpenTelemetry SpanExporter and makes it thread-safe. +type threadSafeExporterWrapper struct { + mu sync.Mutex + exporter sdktrace.SpanExporter +} + +func (tse *threadSafeExporterWrapper) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { + tse.mu.Lock() + defer tse.mu.Unlock() + return tse.exporter.ExportSpans(ctx, spans) +} + +func (tse *threadSafeExporterWrapper) Shutdown(ctx context.Context) error { + tse.mu.Lock() + defer tse.mu.Unlock() + return tse.exporter.Shutdown(ctx) +} diff --git a/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/client.go b/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/client.go index 638b08ce90..e8d13301f3 100644 --- a/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/client.go +++ b/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/client.go @@ -16,17 +16,14 @@ package otlptracegrpc import ( "context" - "errors" - "fmt" "sync" "time" + "github.com/pkg/errors" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" - - "google.golang.org/grpc" - coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" tracepb "go.opentelemetry.io/proto/otlp/trace/v1" + "google.golang.org/grpc" ) type client struct { @@ -38,10 +35,6 @@ type client struct { var _ otlptrace.Client = (*client)(nil) -var ( - errNoClient = errors.New("no client") -) - // NewClient creates a new gRPC trace client. func NewClient(cc *grpc.ClientConn) otlptrace.Client { c := &client{} @@ -73,7 +66,7 @@ func (c *client) Stop(ctx context.Context) error { // UploadTraces sends a batch of spans to the collector. func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { if !c.connection.Connected() { - return fmt.Errorf("traces exporter is disconnected from the server: %w", c.connection.LastConnectError()) + return errors.Wrap(c.connection.LastConnectError(), "traces exporter is disconnected from the server") } ctx, cancel := c.connection.ContextWithStop(ctx) diff --git a/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/connection.go b/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/connection.go index a244882197..dbb0fcd39f 100644 --- a/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/connection.go +++ b/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/connection.go @@ -119,9 +119,7 @@ func (c *Connection) indefiniteBackgroundConnection() { connReattemptPeriod := defaultConnReattemptPeriod - // No strong seeding required, nano time can - // already help with pseudo uniqueness. - rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024))) + rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024))) //nolint:gosec // No strong seeding required, nano time can already help with pseudo uniqueness. // maxJitterNanos: 70% of the connectionReattemptPeriod maxJitterNanos := int64(0.7 * float64(connReattemptPeriod)) diff --git a/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/errors.go b/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/errors.go new file mode 100644 index 0000000000..b05bd02a29 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/errors.go @@ -0,0 +1,7 @@ +package otlptracegrpc + +import "errors" + +var ( + errNoClient = errors.New("no client") +) diff --git a/vendor/github.com/moby/buildkit/util/wildcard/wildcard.go b/vendor/github.com/moby/buildkit/util/wildcard/wildcard.go new file mode 100644 index 0000000000..ef1176c82e --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/wildcard/wildcard.go @@ -0,0 +1,87 @@ +package wildcard + +import ( + "regexp" + "strings" + + "github.com/pkg/errors" +) + +// New returns a wildcard object for a string that contains "*" symbols. +func New(s string) (*Wildcard, error) { + reStr, err := Wildcard2Regexp(s) + if err != nil { + return nil, errors.Wrapf(err, "failed to translate wildcard %q to regexp", s) + } + re, err := regexp.Compile(reStr) + if err != nil { + return nil, errors.Wrapf(err, "failed to compile regexp %q (translated from wildcard %q)", reStr, s) + } + w := &Wildcard{ + orig: s, + re: re, + } + return w, nil +} + +// Wildcard2Regexp translates a wildcard string to a regexp string. +func Wildcard2Regexp(wildcard string) (string, error) { + s := regexp.QuoteMeta(wildcard) + if strings.Contains(s, "\\*\\*") { + return "", errors.New("invalid wildcard: \"**\"") + } + s = strings.ReplaceAll(s, "\\*", "(.*)") + s = "^" + s + "$" + return s, nil +} + +// Wildcard is a wildcard matcher object. +type Wildcard struct { + orig string + re *regexp.Regexp +} + +// String implements fmt.Stringer. +func (w *Wildcard) String() string { + return w.orig +} + +// Match returns a non-nil Match on match. +func (w *Wildcard) Match(q string) *Match { + submatches := w.re.FindStringSubmatch(q) + if len(submatches) == 0 { + return nil + } + m := &Match{ + w: w, + Submatches: submatches, + // FIXME: avoid executing regexp twice + idx: w.re.FindStringSubmatchIndex(q), + } + return m +} + +// Match is a matched result. +type Match struct { + w *Wildcard + Submatches []string // 0: the entire query, 1: the first submatch, 2: the second submatch, ... + idx []int +} + +// String implements fmt.Stringer. +func (m *Match) String() string { + if len(m.Submatches) == 0 { + return "" + } + return m.Submatches[0] +} + +// Format formats submatch strings like "$1", "$2". +func (m *Match) Format(f string) (string, error) { + if m.w == nil || len(m.Submatches) == 0 || len(m.idx) == 0 { + return "", errors.New("invalid state") + } + var b []byte + b = m.w.re.ExpandString(b, f, m.Submatches[0], m.idx) + return string(b), nil +} diff --git a/vendor/github.com/moby/buildkit/util/winlayers/applier.go b/vendor/github.com/moby/buildkit/util/winlayers/applier.go index c9c76b27df..f2b147d674 100644 --- a/vendor/github.com/moby/buildkit/util/winlayers/applier.go +++ b/vendor/github.com/moby/buildkit/util/winlayers/applier.go @@ -4,7 +4,6 @@ import ( "archive/tar" "context" "io" - "io/ioutil" "runtime" "strings" "sync" @@ -39,7 +38,7 @@ type winApplier struct { func (s *winApplier) Apply(ctx context.Context, desc ocispecs.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispecs.Descriptor, err error) { if !hasWindowsLayerMode(ctx) { - return s.a.Apply(ctx, desc, mounts, opts...) + return s.apply(ctx, desc, mounts, opts...) } compressed, err := images.DiffCompression(ctx, desc.MediaType) @@ -87,7 +86,7 @@ func (s *winApplier) Apply(ctx context.Context, desc ocispecs.Descriptor, mounts } // Read any trailing data - if _, err := io.Copy(ioutil.Discard, rc); err != nil { + if _, err := io.Copy(io.Discard, rc); err != nil { discard(err) return err } @@ -138,13 +137,15 @@ func filter(in io.Reader, f func(*tar.Header) bool) (io.Reader, func(error)) { return err } if h.Size > 0 { + //nolint:gosec // never read into memory if _, err := io.Copy(tarWriter, tarReader); err != nil { return err } } } else { if h.Size > 0 { - if _, err := io.Copy(ioutil.Discard, tarReader); err != nil { + //nolint:gosec // never read into memory + if _, err := io.Copy(io.Discard, tarReader); err != nil { return err } } diff --git a/vendor/github.com/moby/buildkit/util/winlayers/apply.go b/vendor/github.com/moby/buildkit/util/winlayers/apply.go new file mode 100644 index 0000000000..20b2faa038 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/winlayers/apply.go @@ -0,0 +1,16 @@ +//go:build !nydus +// +build !nydus + +package winlayers + +import ( + "context" + + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/mount" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func (s *winApplier) apply(ctx context.Context, desc ocispecs.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispecs.Descriptor, err error) { + return s.a.Apply(ctx, desc, mounts, opts...) +} diff --git a/vendor/github.com/moby/buildkit/util/winlayers/apply_nydus.go b/vendor/github.com/moby/buildkit/util/winlayers/apply_nydus.go new file mode 100644 index 0000000000..1ef61b5bca --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/winlayers/apply_nydus.go @@ -0,0 +1,73 @@ +//go:build nydus +// +build nydus + +package winlayers + +import ( + "context" + "io" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/mount" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + + nydusify "github.com/containerd/nydus-snapshotter/pkg/converter" +) + +func isNydusBlob(ctx context.Context, desc ocispecs.Descriptor) bool { + if desc.Annotations == nil { + return false + } + + hasMediaType := desc.MediaType == nydusify.MediaTypeNydusBlob + _, hasAnno := desc.Annotations[nydusify.LayerAnnotationNydusBlob] + return hasMediaType && hasAnno +} + +func (s *winApplier) apply(ctx context.Context, desc ocispecs.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispecs.Descriptor, err error) { + if !isNydusBlob(ctx, desc) { + return s.a.Apply(ctx, desc, mounts, opts...) + } + + var ocidesc ocispecs.Descriptor + if err := mount.WithTempMount(ctx, mounts, func(root string) error { + ra, err := s.cs.ReaderAt(ctx, desc) + if err != nil { + return errors.Wrap(err, "get reader from content store") + } + defer ra.Close() + + pr, pw := io.Pipe() + go func() { + defer pw.Close() + if err := nydusify.Unpack(ctx, ra, pw, nydusify.UnpackOption{}); err != nil { + pw.CloseWithError(errors.Wrap(err, "unpack nydus blob")) + } + }() + defer pr.Close() + + digester := digest.Canonical.Digester() + rc := &readCounter{ + r: io.TeeReader(pr, digester.Hash()), + } + + if _, err := archive.Apply(ctx, root, rc); err != nil { + return errors.Wrap(err, "apply nydus blob") + } + + ocidesc = ocispecs.Descriptor{ + MediaType: ocispecs.MediaTypeImageLayer, + Size: rc.c, + Digest: digester.Digest(), + } + + return nil + }); err != nil { + return ocispecs.Descriptor{}, err + } + + return ocidesc, nil +} diff --git a/vendor/github.com/moby/buildkit/util/winlayers/context.go b/vendor/github.com/moby/buildkit/util/winlayers/context.go index c0bd3f8a2f..e4608892ae 100644 --- a/vendor/github.com/moby/buildkit/util/winlayers/context.go +++ b/vendor/github.com/moby/buildkit/util/winlayers/context.go @@ -12,8 +12,5 @@ func UseWindowsLayerMode(ctx context.Context) context.Context { func hasWindowsLayerMode(ctx context.Context) bool { v := ctx.Value(contextKey) - if v == nil { - return false - } - return true + return v != nil } diff --git a/vendor/github.com/moby/buildkit/util/winlayers/differ.go b/vendor/github.com/moby/buildkit/util/winlayers/differ.go index fc8ba7f7e7..fe2b1c2161 100644 --- a/vendor/github.com/moby/buildkit/util/winlayers/differ.go +++ b/vendor/github.com/moby/buildkit/util/winlayers/differ.go @@ -250,6 +250,7 @@ func makeWindowsLayer(w io.Writer) (io.Writer, func(error), chan error) { return err } if h.Size > 0 { + //nolint:gosec // never read into memory if _, err := io.Copy(tarWriter, tarReader); err != nil { return err } @@ -262,7 +263,6 @@ func makeWindowsLayer(w io.Writer) (io.Writer, func(error), chan error) { } pw.CloseWithError(err) done <- err - return }() discard := func(err error) { diff --git a/vendor/github.com/moby/buildkit/worker/result.go b/vendor/github.com/moby/buildkit/worker/result.go index 5691c630f6..26054cf8c2 100644 --- a/vendor/github.com/moby/buildkit/worker/result.go +++ b/vendor/github.com/moby/buildkit/worker/result.go @@ -26,6 +26,13 @@ func (wr *WorkerRef) ID() string { return wr.Worker.ID() + "::" + refID } +func (wr *WorkerRef) Release(ctx context.Context) error { + if wr.ImmutableRef == nil { + return nil + } + return wr.ImmutableRef.Release(ctx) +} + // GetRemotes method abstracts ImmutableRef's GetRemotes to allow a Worker to override. // This is needed for moby integration. // Use this method instead of calling ImmutableRef.GetRemotes() directly. diff --git a/vendor/github.com/moby/buildkit/worker/worker.go b/vendor/github.com/moby/buildkit/worker/worker.go index 86521c5bab..2f426e9ead 100644 --- a/vendor/github.com/moby/buildkit/worker/worker.go +++ b/vendor/github.com/moby/buildkit/worker/worker.go @@ -2,8 +2,10 @@ package worker import ( "context" + "io" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/leases" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" @@ -17,10 +19,12 @@ import ( ) type Worker interface { + io.Closer // ID needs to be unique in the cluster ID() string Labels() map[string]string Platforms(noCache bool) []ocispecs.Platform + BuildkitVersion() client.BuildkitVersion GCPolicy() []client.PruneInfo LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) @@ -35,6 +39,7 @@ type Worker interface { ContentStore() content.Store Executor() executor.Executor CacheManager() cache.Manager + LeaseManager() leases.Manager } type Infos interface { diff --git a/vendor/github.com/moby/buildkit/worker/workercontroller.go b/vendor/github.com/moby/buildkit/worker/workercontroller.go index 26ca945923..e175b4002b 100644 --- a/vendor/github.com/moby/buildkit/worker/workercontroller.go +++ b/vendor/github.com/moby/buildkit/worker/workercontroller.go @@ -2,6 +2,7 @@ package worker import ( "github.com/containerd/containerd/filters" + "github.com/hashicorp/go-multierror" "github.com/moby/buildkit/client" "github.com/pkg/errors" ) @@ -13,6 +14,16 @@ type Controller struct { workers []Worker } +func (c *Controller) Close() error { + var rerr error + for _, w := range c.workers { + if err := w.Close(); err != nil { + rerr = multierror.Append(rerr, err) + } + } + return rerr +} + // Add adds a local worker. // The first worker becomes the default. // @@ -62,9 +73,10 @@ func (c *Controller) WorkerInfos() []client.WorkerInfo { out := make([]client.WorkerInfo, 0, len(c.workers)) for _, w := range c.workers { out = append(out, client.WorkerInfo{ - ID: w.ID(), - Labels: w.Labels(), - Platforms: w.Platforms(false), + ID: w.ID(), + Labels: w.Labels(), + Platforms: w.Platforms(false), + BuildkitVersion: w.BuildkitVersion(), }) } return out diff --git a/vendor/github.com/package-url/packageurl-go/.gitignore b/vendor/github.com/package-url/packageurl-go/.gitignore new file mode 100644 index 0000000000..a1338d6851 --- /dev/null +++ b/vendor/github.com/package-url/packageurl-go/.gitignore @@ -0,0 +1,14 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/package-url/packageurl-go/.golangci.yaml b/vendor/github.com/package-url/packageurl-go/.golangci.yaml new file mode 100644 index 0000000000..73a5741c92 --- /dev/null +++ b/vendor/github.com/package-url/packageurl-go/.golangci.yaml @@ -0,0 +1,17 @@ +# individual linter configs go here +linters-settings: + +# default linters are enabled `golangci-lint help linters` +linters: + disable-all: true + enable: + - deadcode + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - structcheck + - typecheck + - unused + - varcheck \ No newline at end of file diff --git a/vendor/github.com/package-url/packageurl-go/LICENSE b/vendor/github.com/package-url/packageurl-go/LICENSE new file mode 100644 index 0000000000..0b5633b5de --- /dev/null +++ b/vendor/github.com/package-url/packageurl-go/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) the purl authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/package-url/packageurl-go/Makefile b/vendor/github.com/package-url/packageurl-go/Makefile new file mode 100644 index 0000000000..f6e71425f7 --- /dev/null +++ b/vendor/github.com/package-url/packageurl-go/Makefile @@ -0,0 +1,12 @@ +.PHONY: test clean lint + +test: + curl -L https://raw.githubusercontent.com/package-url/purl-spec/master/test-suite-data.json -o testdata/test-suite-data.json + go test -v -cover ./... + +clean: + find . -name "test-suite-data.json" | xargs rm -f + +lint: + go get -u golang.org/x/lint/golint + golint -set_exit_status diff --git a/vendor/github.com/package-url/packageurl-go/README.md b/vendor/github.com/package-url/packageurl-go/README.md new file mode 100644 index 0000000000..783985498b --- /dev/null +++ b/vendor/github.com/package-url/packageurl-go/README.md @@ -0,0 +1,74 @@ +# packageurl-go + +[![build](https://github.com/package-url/packageurl-go/workflows/test/badge.svg)](https://github.com/package-url/packageurl-go/actions?query=workflow%3Atest) [![Coverage Status](https://coveralls.io/repos/github/package-url/packageurl-go/badge.svg)](https://coveralls.io/github/package-url/packageurl-go) [![PkgGoDev](https://pkg.go.dev/badge/github.com/package-url/packageurl-go)](https://pkg.go.dev/github.com/package-url/packageurl-go) [![Go Report Card](https://goreportcard.com/badge/github.com/package-url/packageurl-go)](https://goreportcard.com/report/github.com/package-url/packageurl-go) + +Go implementation of the package url spec. + + +## Install +``` +go get -u github.com/package-url/packageurl-go +``` + +## Versioning + +The versions will follow the spec. So if the spec is released at ``1.0``. Then all versions in the ``1.x.y`` will follow the ``1.x`` spec. + + +## Usage + +### Create from parts +```go +package main + +import ( + "fmt" + + "github.com/package-url/packageurl-go" +) + +func main() { + instance := packageurl.NewPackageURL("test", "ok", "name", "version", nil, "") + fmt.Printf("%s", instance.ToString()) +} +``` + +### Parse from string +```go +package main + +import ( + "fmt" + + "github.com/package-url/packageurl-go" +) + +func main() { + instance, err := packageurl.FromString("test:ok/name@version") + if err != nil { + panic(err) + } + fmt.Printf("%#v", instance) +} + +``` + + +## Test +Testing using the normal ``go test`` command. Using ``make test`` will pull the test fixtures shared between all package-url projects and then execute the tests. + +``` +$ make test +curl -L https://raw.githubusercontent.com/package-url/purl-test-suite/master/test-suite-data.json -o testdata/test-suite-data.json + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 7181 100 7181 0 0 1202 0 0:00:05 0:00:05 --:--:-- 1611 +go test -v -cover ./... +=== RUN TestFromStringExamples +--- PASS: TestFromStringExamples (0.00s) +=== RUN TestToStringExamples +--- PASS: TestToStringExamples (0.00s) +PASS +coverage: 94.7% of statements +ok github.com/package-url/packageurl-go 0.002s +``` diff --git a/vendor/github.com/package-url/packageurl-go/VERSION b/vendor/github.com/package-url/packageurl-go/VERSION new file mode 100644 index 0000000000..77d6f4ca23 --- /dev/null +++ b/vendor/github.com/package-url/packageurl-go/VERSION @@ -0,0 +1 @@ +0.0.0 diff --git a/vendor/github.com/package-url/packageurl-go/packageurl.go b/vendor/github.com/package-url/packageurl-go/packageurl.go new file mode 100644 index 0000000000..3cba7095d5 --- /dev/null +++ b/vendor/github.com/package-url/packageurl-go/packageurl.go @@ -0,0 +1,402 @@ +/* +Copyright (c) the purl authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Package packageurl implements the package-url spec +package packageurl + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "sort" + "strings" +) + +var ( + // QualifierKeyPattern describes a valid qualifier key: + // + // - The key must be composed only of ASCII letters and numbers, '.', + // '-' and '_' (period, dash and underscore). + // - A key cannot start with a number. + QualifierKeyPattern = regexp.MustCompile(`^[A-Za-z\.\-_][0-9A-Za-z\.\-_]*$`) +) + +// These are the known purl types as defined in the spec. Some of these require +// special treatment during parsing. +// https://github.com/package-url/purl-spec#known-purl-types +var ( + // TypeBitbucket is a pkg:bitbucket purl. + TypeBitbucket = "bitbucket" + // TypeCocoapods is a pkg:cocoapods purl. + TypeCocoapods = "cocoapods" + // TypeCargo is a pkg:cargo purl. + TypeCargo = "cargo" + // TypeComposer is a pkg:composer purl. + TypeComposer = "composer" + // TypeConan is a pkg:conan purl. + TypeConan = "conan" + // TypeConda is a pkg:conda purl. + TypeConda = "conda" + // TypeCran is a pkg:cran purl. + TypeCran = "cran" + // TypeDebian is a pkg:deb purl. + TypeDebian = "deb" + // TypeDocker is a pkg:docker purl. + TypeDocker = "docker" + // TypeGem is a pkg:gem purl. + TypeGem = "gem" + // TypeGeneric is a pkg:generic purl. + TypeGeneric = "generic" + // TypeGithub is a pkg:github purl. + TypeGithub = "github" + // TypeGolang is a pkg:golang purl. + TypeGolang = "golang" + // TypeHackage is a pkg:hackage purl. + TypeHackage = "hackage" + // TypeHex is a pkg:hex purl. + TypeHex = "hex" + // TypeMaven is a pkg:maven purl. + TypeMaven = "maven" + // TypeNPM is a pkg:npm purl. + TypeNPM = "npm" + // TypeNuget is a pkg:nuget purl. + TypeNuget = "nuget" + // TypeOCI is a pkg:oci purl + TypeOCI = "oci" + // TypePyPi is a pkg:pypi purl. + TypePyPi = "pypi" + // TypeRPM is a pkg:rpm purl. + TypeRPM = "rpm" + // TypeSwift is pkg:swift purl + TypeSwift = "swift" +) + +// Qualifier represents a single key=value qualifier in the package url +type Qualifier struct { + Key string + Value string +} + +func (q Qualifier) String() string { + // A value must be a percent-encoded string + return fmt.Sprintf("%s=%s", q.Key, url.PathEscape(q.Value)) +} + +// Qualifiers is a slice of key=value pairs, with order preserved as it appears +// in the package URL. +type Qualifiers []Qualifier + +// QualifiersFromMap constructs a Qualifiers slice from a string map. To get a +// deterministic qualifier order (despite maps not providing any iteration order +// guarantees) the returned Qualifiers are sorted in increasing order of key. +func QualifiersFromMap(mm map[string]string) Qualifiers { + q := Qualifiers{} + + for k, v := range mm { + q = append(q, Qualifier{Key: k, Value: v}) + } + + // sort for deterministic qualifier order + sort.Slice(q, func(i int, j int) bool { return q[i].Key < q[j].Key }) + + return q +} + +// Map converts a Qualifiers struct to a string map. +func (qq Qualifiers) Map() map[string]string { + m := make(map[string]string) + + for i := 0; i < len(qq); i++ { + k := qq[i].Key + v := qq[i].Value + m[k] = v + } + + return m +} + +func (qq Qualifiers) String() string { + var kvPairs []string + for _, q := range qq { + kvPairs = append(kvPairs, q.String()) + } + return strings.Join(kvPairs, "&") +} + +// PackageURL is the struct representation of the parts that make a package url +type PackageURL struct { + Type string + Namespace string + Name string + Version string + Qualifiers Qualifiers + Subpath string +} + +// NewPackageURL creates a new PackageURL struct instance based on input +func NewPackageURL(purlType, namespace, name, version string, + qualifiers Qualifiers, subpath string) *PackageURL { + + return &PackageURL{ + Type: purlType, + Namespace: namespace, + Name: name, + Version: version, + Qualifiers: qualifiers, + Subpath: subpath, + } +} + +// ToString returns the human-readable instance of the PackageURL structure. +// This is the literal purl as defined by the spec. +func (p *PackageURL) ToString() string { + // Start with the type and a colon + purl := fmt.Sprintf("pkg:%s/", p.Type) + // Add namespaces if provided + if p.Namespace != "" { + var ns []string + for _, item := range strings.Split(p.Namespace, "/") { + ns = append(ns, url.QueryEscape(item)) + } + purl = purl + strings.Join(ns, "/") + "/" + } + // The name is always required and must be a percent-encoded string + // Use url.QueryEscape instead of PathEscape, as it handles @ signs + purl = purl + url.QueryEscape(p.Name) + // If a version is provided, add it after the at symbol + if p.Version != "" { + // A name must be a percent-encoded string + purl = purl + "@" + url.PathEscape(p.Version) + } + + // Iterate over qualifiers and make groups of key=value + var qualifiers []string + for _, q := range p.Qualifiers { + qualifiers = append(qualifiers, q.String()) + } + // If there are one or more key=value pairs, append on the package url + if len(qualifiers) != 0 { + purl = purl + "?" + strings.Join(qualifiers, "&") + } + // Add a subpath if available + if p.Subpath != "" { + purl = purl + "#" + p.Subpath + } + return purl +} + +func (p PackageURL) String() string { + return p.ToString() +} + +// FromString parses a valid package url string into a PackageURL structure +func FromString(purl string) (PackageURL, error) { + initialIndex := strings.Index(purl, "#") + // Start with purl being stored in the remainder + remainder := purl + substring := "" + if initialIndex != -1 { + initialSplit := strings.SplitN(purl, "#", 2) + remainder = initialSplit[0] + rightSide := initialSplit[1] + rightSide = strings.TrimLeft(rightSide, "/") + rightSide = strings.TrimRight(rightSide, "/") + var rightSides []string + + for _, item := range strings.Split(rightSide, "/") { + item = strings.Replace(item, ".", "", -1) + item = strings.Replace(item, "..", "", -1) + if item != "" { + i, err := url.PathUnescape(item) + if err != nil { + return PackageURL{}, fmt.Errorf("failed to unescape path: %s", err) + } + rightSides = append(rightSides, i) + } + } + substring = strings.Join(rightSides, "/") + } + qualifiers := Qualifiers{} + index := strings.LastIndex(remainder, "?") + // If we don't have anything to split then return an empty result + if index != -1 { + qualifier := remainder[index+1:] + for _, item := range strings.Split(qualifier, "&") { + kv := strings.Split(item, "=") + key := strings.ToLower(kv[0]) + key, err := url.PathUnescape(key) + if err != nil { + return PackageURL{}, fmt.Errorf("failed to unescape qualifier key: %s", err) + } + if !validQualifierKey(key) { + return PackageURL{}, fmt.Errorf("invalid qualifier key: '%s'", key) + } + // TODO + // - If the `key` is `checksums`, split the `value` on ',' to create + // a list of `checksums` + if kv[1] == "" { + continue + } + value, err := url.PathUnescape(kv[1]) + if err != nil { + return PackageURL{}, fmt.Errorf("failed to unescape qualifier value: %s", err) + } + qualifiers = append(qualifiers, Qualifier{key, value}) + } + remainder = remainder[:index] + } + + nextSplit := strings.SplitN(remainder, ":", 2) + if len(nextSplit) != 2 || nextSplit[0] != "pkg" { + return PackageURL{}, errors.New("scheme is missing") + } + // leading slashes after pkg: are to be ignored (pkg://maven is + // equivalent to pkg:maven) + remainder = strings.TrimLeft(nextSplit[1], "/") + + nextSplit = strings.SplitN(remainder, "/", 2) + if len(nextSplit) != 2 { + return PackageURL{}, errors.New("type is missing") + } + // purl type is case-insensitive, canonical form is lower-case + purlType := strings.ToLower(nextSplit[0]) + remainder = nextSplit[1] + + index = strings.LastIndex(remainder, "/") + name := typeAdjustName(purlType, remainder[index+1:]) + version := "" + + atIndex := strings.Index(name, "@") + if atIndex != -1 { + v, err := url.PathUnescape(name[atIndex+1:]) + if err != nil { + return PackageURL{}, fmt.Errorf("failed to unescape purl version: %s", err) + } + version = v + + unecapeName, err := url.PathUnescape(name[:atIndex]) + if err != nil { + return PackageURL{}, fmt.Errorf("failed to unescape purl name: %s", err) + } + name = unecapeName + } + var namespaces []string + + if index != -1 { + remainder = remainder[:index] + + for _, item := range strings.Split(remainder, "/") { + if item != "" { + unescaped, err := url.PathUnescape(item) + if err != nil { + return PackageURL{}, fmt.Errorf("failed to unescape path: %s", err) + } + namespaces = append(namespaces, unescaped) + } + } + } + namespace := strings.Join(namespaces, "/") + namespace = typeAdjustNamespace(purlType, namespace) + + // Fail if name is empty at this point + if name == "" { + return PackageURL{}, errors.New("name is required") + } + + err := validCustomRules(purlType, name, namespace, version, qualifiers) + if err != nil { + return PackageURL{}, err + } + + return PackageURL{ + Type: purlType, + Namespace: namespace, + Name: name, + Version: version, + Qualifiers: qualifiers, + Subpath: substring, + }, nil +} + +// Make any purl type-specific adjustments to the parsed namespace. +// See https://github.com/package-url/purl-spec#known-purl-types +func typeAdjustNamespace(purlType, ns string) string { + switch purlType { + case TypeBitbucket, TypeDebian, TypeGithub, TypeGolang, TypeNPM, TypeRPM: + return strings.ToLower(ns) + } + return ns +} + +// Make any purl type-specific adjustments to the parsed name. +// See https://github.com/package-url/purl-spec#known-purl-types +func typeAdjustName(purlType, name string) string { + switch purlType { + case TypeBitbucket, TypeDebian, TypeGithub, TypeGolang, TypeNPM: + return strings.ToLower(name) + case TypePyPi: + return strings.ToLower(strings.ReplaceAll(name, "_", "-")) + } + return name +} + +// validQualifierKey validates a qualifierKey against our QualifierKeyPattern. +func validQualifierKey(key string) bool { + return QualifierKeyPattern.MatchString(key) +} + +// validCustomRules evaluates additional rules for each package url type, as specified in the package-url specification. +// On success, it returns nil. On failure, a descriptive error will be returned. +func validCustomRules(purlType, name, ns, version string, qualifiers Qualifiers) error { + q := qualifiers.Map() + switch purlType { + case TypeConan: + if ns != "" { + if val, ok := q["channel"]; ok { + if val == "" { + return errors.New("the qualifier channel must be not empty if namespace is present") + } + } else { + return errors.New("channel qualifier does not exist") + } + } else { + if val, ok := q["channel"]; ok { + if val != "" { + return errors.New("namespace is required if channel is non empty") + } + } + } + case TypeSwift: + if ns == "" { + return errors.New("namespace is required") + } + if version == "" { + return errors.New("version is required") + } + case TypeCran: + if version == "" { + return errors.New("version is required") + } + } + return nil +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE b/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE new file mode 100644 index 0000000000..e51324f9b5 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2021 NYU Secure Systems Lab + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go new file mode 100644 index 0000000000..fb1d5918b2 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go @@ -0,0 +1,145 @@ +package cjson + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "regexp" + "sort" +) + +/* +encodeCanonicalString is a helper function to canonicalize the passed string +according to the OLPC canonical JSON specification for strings (see +http://wiki.laptop.org/go/Canonical_JSON). String canonicalization consists of +escaping backslashes ("\") and double quotes (") and wrapping the resulting +string in double quotes ("). +*/ +func encodeCanonicalString(s string) string { + re := regexp.MustCompile(`([\"\\])`) + return fmt.Sprintf("\"%s\"", re.ReplaceAllString(s, "\\$1")) +} + +/* +encodeCanonical is a helper function to recursively canonicalize the passed +object according to the OLPC canonical JSON specification (see +http://wiki.laptop.org/go/Canonical_JSON) and write it to the passed +*bytes.Buffer. If canonicalization fails it returns an error. +*/ +func encodeCanonical(obj interface{}, result *bytes.Buffer) (err error) { + // Since this function is called recursively, we use panic if an error occurs + // and recover in a deferred function, which is always called before + // returning. There we set the error that is returned eventually. + defer func() { + if r := recover(); r != nil { + err = errors.New(r.(string)) + } + }() + + switch objAsserted := obj.(type) { + case string: + result.WriteString(encodeCanonicalString(objAsserted)) + + case bool: + if objAsserted { + result.WriteString("true") + } else { + result.WriteString("false") + } + + // The wrapping `EncodeCanonical` function decodes the passed json data with + // `decoder.UseNumber` so that any numeric value is stored as `json.Number` + // (instead of the default `float64`). This allows us to assert that it is a + // non-floating point number, which are the only numbers allowed by the used + // canonicalization specification. + case json.Number: + if _, err := objAsserted.Int64(); err != nil { + panic(fmt.Sprintf("Can't canonicalize floating point number '%s'", + objAsserted)) + } + result.WriteString(objAsserted.String()) + + case nil: + result.WriteString("null") + + // Canonicalize slice + case []interface{}: + result.WriteString("[") + for i, val := range objAsserted { + if err := encodeCanonical(val, result); err != nil { + return err + } + if i < (len(objAsserted) - 1) { + result.WriteString(",") + } + } + result.WriteString("]") + + case map[string]interface{}: + result.WriteString("{") + + // Make a list of keys + var mapKeys []string + for key := range objAsserted { + mapKeys = append(mapKeys, key) + } + // Sort keys + sort.Strings(mapKeys) + + // Canonicalize map + for i, key := range mapKeys { + // Note: `key` must be a `string` (see `case map[string]interface{}`) and + // canonicalization of strings cannot err out (see `case string`), thus + // no error handling is needed here. + encodeCanonical(key, result) + + result.WriteString(":") + if err := encodeCanonical(objAsserted[key], result); err != nil { + return err + } + if i < (len(mapKeys) - 1) { + result.WriteString(",") + } + i++ + } + result.WriteString("}") + + default: + // We recover in a deferred function defined above + panic(fmt.Sprintf("Can't canonicalize '%s' of type '%s'", + objAsserted, reflect.TypeOf(objAsserted))) + } + return nil +} + +/* +EncodeCanonical JSON canonicalizes the passed object and returns it as a byte +slice. It uses the OLPC canonical JSON specification (see +http://wiki.laptop.org/go/Canonical_JSON). If canonicalization fails the byte +slice is nil and the second return value contains the error. +*/ +func EncodeCanonical(obj interface{}) ([]byte, error) { + // FIXME: Terrible hack to turn the passed struct into a map, converting + // the struct's variable names to the json key names defined in the struct + data, err := json.Marshal(obj) + if err != nil { + return nil, err + } + var jsonMap interface{} + + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + if err := dec.Decode(&jsonMap); err != nil { + return nil, err + } + + // Create a buffer and write the canonicalized JSON bytes to it + var result bytes.Buffer + if err := encodeCanonical(jsonMap, &result); err != nil { + return nil, err + } + + return result.Bytes(), nil +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go new file mode 100644 index 0000000000..3dc05a4294 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go @@ -0,0 +1,197 @@ +/* +Package dsse implements the Dead Simple Signing Envelope (DSSE) +https://github.com/secure-systems-lab/dsse +*/ +package dsse + +import ( + "encoding/base64" + "errors" + "fmt" +) + +// ErrUnknownKey indicates that the implementation does not recognize the +// key. +var ErrUnknownKey = errors.New("unknown key") + +// ErrNoSignature indicates that an envelope did not contain any signatures. +var ErrNoSignature = errors.New("no signature found") + +// ErrNoSigners indicates that no signer was provided. +var ErrNoSigners = errors.New("no signers provided") + +/* +Envelope captures an envelope as described by the Secure Systems Lab +Signing Specification. See here: +https://github.com/secure-systems-lab/signing-spec/blob/master/envelope.md +*/ +type Envelope struct { + PayloadType string `json:"payloadType"` + Payload string `json:"payload"` + Signatures []Signature `json:"signatures"` +} + +/* +DecodeB64Payload returns the serialized body, decoded +from the envelope's payload field. A flexible +decoder is used, first trying standard base64, then +URL-encoded base64. +*/ +func (e *Envelope) DecodeB64Payload() ([]byte, error) { + return b64Decode(e.Payload) +} + +/* +Signature represents a generic in-toto signature that contains the identifier +of the key which was used to create the signature. +The used signature scheme has to be agreed upon by the signer and verifer +out of band. +The signature is a base64 encoding of the raw bytes from the signature +algorithm. +*/ +type Signature struct { + KeyID string `json:"keyid"` + Sig string `json:"sig"` +} + +/* +PAE implementes the DSSE Pre-Authentic Encoding +https://github.com/secure-systems-lab/dsse/blob/master/protocol.md#signature-definition +*/ +func PAE(payloadType string, payload []byte) []byte { + return []byte(fmt.Sprintf("DSSEv1 %d %s %d %s", + len(payloadType), payloadType, + len(payload), payload)) +} + +/* +Signer defines the interface for an abstract signing algorithm. +The Signer interface is used to inject signature algorithm implementations +into the EnevelopeSigner. This decoupling allows for any signing algorithm +and key management system can be used. +The full message is provided as the parameter. If the signature algorithm +depends on hashing of the message prior to signature calculation, the +implementor of this interface must perform such hashing. +The function must return raw bytes representing the calculated signature +using the current algorithm, and the key used (if applicable). +For an example see EcdsaSigner in sign_test.go. +*/ +type Signer interface { + Sign(data []byte) ([]byte, error) + KeyID() (string, error) +} + +// SignVerifer provides both the signing and verification interface. +type SignVerifier interface { + Signer + Verifier +} + +// EnvelopeSigner creates signed Envelopes. +type EnvelopeSigner struct { + providers []SignVerifier + ev *EnvelopeVerifier +} + +/* +NewEnvelopeSigner creates an EnvelopeSigner that uses 1+ Signer +algorithms to sign the data. +Creates a verifier with threshold=1, at least one of the providers must validate signitures successfully. +*/ +func NewEnvelopeSigner(p ...SignVerifier) (*EnvelopeSigner, error) { + return NewMultiEnvelopeSigner(1, p...) +} + +/* +NewMultiEnvelopeSigner creates an EnvelopeSigner that uses 1+ Signer +algorithms to sign the data. +Creates a verifier with threshold. +threashold indicates the amount of providers that must validate the envelope. +*/ +func NewMultiEnvelopeSigner(threshold int, p ...SignVerifier) (*EnvelopeSigner, error) { + var providers []SignVerifier + + for _, sv := range p { + if sv != nil { + providers = append(providers, sv) + } + } + + if len(providers) == 0 { + return nil, ErrNoSigners + } + + evps := []Verifier{} + for _, p := range providers { + evps = append(evps, p.(Verifier)) + } + + ev, err := NewMultiEnvelopeVerifier(threshold, evps...) + if err != nil { + return nil, err + } + + return &EnvelopeSigner{ + providers: providers, + ev: ev, + }, nil +} + +/* +SignPayload signs a payload and payload type according to DSSE. +Returned is an envelope as defined here: +https://github.com/secure-systems-lab/dsse/blob/master/envelope.md +One signature will be added for each Signer in the EnvelopeSigner. +*/ +func (es *EnvelopeSigner) SignPayload(payloadType string, body []byte) (*Envelope, error) { + var e = Envelope{ + Payload: base64.StdEncoding.EncodeToString(body), + PayloadType: payloadType, + } + + paeEnc := PAE(payloadType, body) + + for _, signer := range es.providers { + sig, err := signer.Sign(paeEnc) + if err != nil { + return nil, err + } + keyID, err := signer.KeyID() + if err != nil { + keyID = "" + } + + e.Signatures = append(e.Signatures, Signature{ + KeyID: keyID, + Sig: base64.StdEncoding.EncodeToString(sig), + }) + } + + return &e, nil +} + +/* +Verify decodes the payload and verifies the signature. +Any domain specific validation such as parsing the decoded body and +validating the payload type is left out to the caller. +Verify returns a list of accepted keys each including a keyid, public and signiture of the accepted provider keys. +*/ +func (es *EnvelopeSigner) Verify(e *Envelope) ([]AcceptedKey, error) { + return es.ev.Verify(e) +} + +/* +Both standard and url encoding are allowed: +https://github.com/secure-systems-lab/dsse/blob/master/envelope.md +*/ +func b64Decode(s string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b, err = base64.URLEncoding.DecodeString(s) + if err != nil { + return nil, err + } + } + + return b, nil +} diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go new file mode 100644 index 0000000000..ead1c32ca8 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go @@ -0,0 +1,146 @@ +package dsse + +import ( + "crypto" + "errors" + "fmt" + + "golang.org/x/crypto/ssh" +) + +/* +Verifier verifies a complete message against a signature and key. +If the message was hashed prior to signature generation, the verifier +must perform the same steps. +If KeyID returns successfully, only signature matching the key ID will be verified. +*/ +type Verifier interface { + Verify(data, sig []byte) error + KeyID() (string, error) + Public() crypto.PublicKey +} + +type EnvelopeVerifier struct { + providers []Verifier + threshold int +} + +type AcceptedKey struct { + Public crypto.PublicKey + KeyID string + Sig Signature +} + +func (ev *EnvelopeVerifier) Verify(e *Envelope) ([]AcceptedKey, error) { + if e == nil { + return nil, errors.New("cannot verify a nil envelope") + } + + if len(e.Signatures) == 0 { + return nil, ErrNoSignature + } + + // Decode payload (i.e serialized body) + body, err := e.DecodeB64Payload() + if err != nil { + return nil, err + } + // Generate PAE(payloadtype, serialized body) + paeEnc := PAE(e.PayloadType, body) + + // If *any* signature is found to be incorrect, it is skipped + var acceptedKeys []AcceptedKey + usedKeyids := make(map[string]string) + unverified_providers := ev.providers + for _, s := range e.Signatures { + sig, err := b64Decode(s.Sig) + if err != nil { + return nil, err + } + + // Loop over the providers. + // If provider and signature include key IDs but do not match skip. + // If a provider recognizes the key, we exit + // the loop and use the result. + providers := unverified_providers + for i, v := range providers { + keyID, err := v.KeyID() + + // Verifiers that do not provide a keyid will be generated one using public. + if err != nil || keyID == "" { + keyID, err = SHA256KeyID(v.Public()) + if err != nil { + keyID = "" + } + } + + if s.KeyID != "" && keyID != "" && err == nil && s.KeyID != keyID { + continue + } + + err = v.Verify(paeEnc, sig) + if err != nil { + continue + } + + acceptedKey := AcceptedKey{ + Public: v.Public(), + KeyID: keyID, + Sig: s, + } + unverified_providers = removeIndex(providers, i) + + // See https://github.com/in-toto/in-toto/pull/251 + if _, ok := usedKeyids[keyID]; ok { + fmt.Printf("Found envelope signed by different subkeys of the same main key, Only one of them is counted towards the step threshold, KeyID=%s\n", keyID) + continue + } + + usedKeyids[keyID] = "" + acceptedKeys = append(acceptedKeys, acceptedKey) + break + } + } + + // Sanity if with some reflect magic this happens. + if ev.threshold <= 0 || ev.threshold > len(ev.providers) { + return nil, errors.New("Invalid threshold") + } + + if len(usedKeyids) < ev.threshold { + return acceptedKeys, errors.New(fmt.Sprintf("Accepted signatures do not match threshold, Found: %d, Expected %d", len(acceptedKeys), ev.threshold)) + } + + return acceptedKeys, nil +} + +func NewEnvelopeVerifier(v ...Verifier) (*EnvelopeVerifier, error) { + return NewMultiEnvelopeVerifier(1, v...) +} + +func NewMultiEnvelopeVerifier(threshold int, p ...Verifier) (*EnvelopeVerifier, error) { + + if threshold <= 0 || threshold > len(p) { + return nil, errors.New("Invalid threshold") + } + + ev := EnvelopeVerifier{ + providers: p, + threshold: threshold, + } + return &ev, nil +} + +func SHA256KeyID(pub crypto.PublicKey) (string, error) { + // Generate public key fingerprint + sshpk, err := ssh.NewPublicKey(pub) + if err != nil { + return "", err + } + fingerprint := ssh.FingerprintSHA256(sshpk) + return fingerprint, nil +} + +func removeIndex(v []Verifier, index int) []Verifier { + return append(v[:index], v[index+1:]...) +} diff --git a/vendor/github.com/shibumi/go-pathspec/.gitignore b/vendor/github.com/shibumi/go-pathspec/.gitignore new file mode 100644 index 0000000000..3e32393f12 --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test + +# ignore .idea +.idea diff --git a/vendor/github.com/shibumi/go-pathspec/GO-LICENSE b/vendor/github.com/shibumi/go-pathspec/GO-LICENSE new file mode 100644 index 0000000000..7448756763 --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/GO-LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/shibumi/go-pathspec/LICENSE b/vendor/github.com/shibumi/go-pathspec/LICENSE new file mode 100644 index 0000000000..5c304d1a4a --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/shibumi/go-pathspec/README.md b/vendor/github.com/shibumi/go-pathspec/README.md new file mode 100644 index 0000000000..c146cf69b0 --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/README.md @@ -0,0 +1,45 @@ +# go-pathspec + +[![build](https://github.com/shibumi/go-pathspec/workflows/build/badge.svg)](https://github.com/shibumi/go-pathspec/actions?query=workflow%3Abuild) [![Coverage Status](https://coveralls.io/repos/github/shibumi/go-pathspec/badge.svg)](https://coveralls.io/github/shibumi/go-pathspec) [![PkgGoDev](https://pkg.go.dev/badge/github.com/shibumi/go-pathspec)](https://pkg.go.dev/github.com/shibumi/go-pathspec) + +go-pathspec implements gitignore-style pattern matching for paths. + +## Alternatives + +There are a few alternatives, that try to be gitignore compatible or even state +gitignore compatibility: + +### https://github.com/go-git/go-git + +go-git states it would be gitignore compatible, but actually they are missing a few +special cases. This issue describes one of the not working patterns: https://github.com/go-git/go-git/issues/108 + +What does not work is global filename pattern matching. Consider the following +`.gitignore` file: + +```gitignore +# gitignore test file +parse.go +``` + +Then `parse.go` should match on all filenames called `parse.go`. You can test this via +this shell script: +```shell +mkdir -p /tmp/test/internal/util +touch /tmp/test/internal/util/parse.go +cd /tmp/test/ +git init +echo "parse.go" > .gitignore +``` + +With git `parse.go` will be excluded. The go-git implementation behaves different. + +### https://github.com/monochromegane/go-gitignore + +monochromegane's go-gitignore does not support the use of `**`-operators. +This is not consistent to real gitignore behavior, too. + +## Authors + +Sander van Harmelen () +Christian Rebischke () diff --git a/vendor/github.com/shibumi/go-pathspec/gitignore.go b/vendor/github.com/shibumi/go-pathspec/gitignore.go new file mode 100644 index 0000000000..2b08d4e8a5 --- /dev/null +++ b/vendor/github.com/shibumi/go-pathspec/gitignore.go @@ -0,0 +1,299 @@ +// +// Copyright 2014, Sander van Harmelen +// Copyright 2020, Christian Rebischke +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package pathspec implements git compatible gitignore pattern matching. +// See the description below, if you are unfamiliar with it: +// +// A blank line matches no files, so it can serve as a separator for readability. +// +// A line starting with # serves as a comment. Put a backslash ("\") in front of +// the first hash for patterns that begin with a hash. +// +// An optional prefix "!" which negates the pattern; any matching file excluded +// by a previous pattern will become included again. If a negated pattern matches, +// this will override lower precedence patterns sources. Put a backslash ("\") in +// front of the first "!" for patterns that begin with a literal "!", for example, +// "\!important!.txt". +// +// If the pattern ends with a slash, it is removed for the purpose of the following +// description, but it would only find a match with a directory. In other words, +// foo/ will match a directory foo and paths underneath it, but will not match a +// regular file or a symbolic link foo (this is consistent with the way how pathspec +// works in general in Git). +// +// If the pattern does not contain a slash /, Git treats it as a shell glob pattern +// and checks for a match against the pathname relative to the location of the +// .gitignore file (relative to the toplevel of the work tree if not from a +// .gitignore file). +// +// Otherwise, Git treats the pattern as a shell glob suitable for consumption by +// fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will not match +// a / in the pathname. For example, "Documentation/*.html" matches +// "Documentation/git.html" but not "Documentation/ppc/ppc.html" or/ +// "tools/perf/Documentation/perf.html". +// +// A leading slash matches the beginning of the pathname. For example, "/*.c" +// matches "cat-file.c" but not "mozilla-sha1/sha1.c". +// +// Two consecutive asterisks ("**") in patterns matched against full pathname +// may have special meaning: +// +// A leading "**" followed by a slash means match in all directories. For example, +// "**/foo" matches file or directory "foo" anywhere, the same as pattern "foo". +// "**/foo/bar" matches file or directory "bar" anywhere that is directly under +// directory "foo". +// +// A trailing "/" matches everything inside. For example, "abc/" matches all files +// inside directory "abc", relative to the location of the .gitignore file, with +// infinite depth. +// +// A slash followed by two consecutive asterisks then a slash matches zero or more +// directories. For example, "a/**/b" matches "a/b", "a/x/b", "a/x/y/b" and so on. +// +// Other consecutive asterisks are considered invalid. +package pathspec + +import ( + "bufio" + "bytes" + "io" + "path/filepath" + "regexp" + "strings" +) + +type gitIgnorePattern struct { + Regex string + Include bool +} + +// GitIgnore uses a string slice of patterns for matching on a filepath string. +// On match it returns true, otherwise false. On error it passes the error through. +func GitIgnore(patterns []string, name string) (ignore bool, err error) { + for _, pattern := range patterns { + p := parsePattern(pattern) + // Convert Windows paths to Unix paths + name = filepath.ToSlash(name) + match, err := regexp.MatchString(p.Regex, name) + if err != nil { + return ignore, err + } + if match { + if p.Include { + return false, nil + } + ignore = true + } + } + return ignore, nil +} + +// ReadGitIgnore implements the io.Reader interface for reading a gitignore file +// line by line. It behaves exactly like the GitIgnore function. The only difference +// is that GitIgnore works on a string slice. +// +// ReadGitIgnore returns a boolean value if we match or not and an error. +func ReadGitIgnore(content io.Reader, name string) (ignore bool, err error) { + scanner := bufio.NewScanner(content) + + for scanner.Scan() { + pattern := strings.TrimSpace(scanner.Text()) + if len(pattern) == 0 || pattern[0] == '#' { + continue + } + p := parsePattern(pattern) + // Convert Windows paths to Unix paths + name = filepath.ToSlash(name) + match, err := regexp.MatchString(p.Regex, name) + if err != nil { + return ignore, err + } + if match { + if p.Include { + return false, scanner.Err() + } + ignore = true + } + } + return ignore, scanner.Err() +} + +func parsePattern(pattern string) *gitIgnorePattern { + p := &gitIgnorePattern{} + + // An optional prefix "!" which negates the pattern; any matching file + // excluded by a previous pattern will become included again. + if strings.HasPrefix(pattern, "!") { + pattern = pattern[1:] + p.Include = true + } else { + p.Include = false + } + + // Remove leading back-slash escape for escaped hash ('#') or + // exclamation mark ('!'). + if strings.HasPrefix(pattern, "\\") { + pattern = pattern[1:] + } + + // Split pattern into segments. + patternSegs := strings.Split(pattern, "/") + + // A pattern beginning with a slash ('/') will only match paths + // directly on the root directory instead of any descendant paths. + // So remove empty first segment to make pattern absoluut to root. + // A pattern without a beginning slash ('/') will match any + // descendant path. This is equivilent to "**/{pattern}". So + // prepend with double-asterisks to make pattern relative to + // root. + if patternSegs[0] == "" { + patternSegs = patternSegs[1:] + } else if patternSegs[0] != "**" { + patternSegs = append([]string{"**"}, patternSegs...) + } + + // A pattern ending with a slash ('/') will match all descendant + // paths of if it is a directory but not if it is a regular file. + // This is equivalent to "{pattern}/**". So, set last segment to + // double asterisks to include all descendants. + if patternSegs[len(patternSegs)-1] == "" { + patternSegs[len(patternSegs)-1] = "**" + } + + // Build regular expression from pattern. + var expr bytes.Buffer + expr.WriteString("^") + needSlash := false + + for i, seg := range patternSegs { + switch seg { + case "**": + switch { + case i == 0 && i == len(patternSegs)-1: + // A pattern consisting solely of double-asterisks ('**') + // will match every path. + expr.WriteString(".+") + case i == 0: + // A normalized pattern beginning with double-asterisks + // ('**') will match any leading path segments. + expr.WriteString("(?:.+/)?") + needSlash = false + case i == len(patternSegs)-1: + // A normalized pattern ending with double-asterisks ('**') + // will match any trailing path segments. + expr.WriteString("/.+") + default: + // A pattern with inner double-asterisks ('**') will match + // multiple (or zero) inner path segments. + expr.WriteString("(?:/.+)?") + needSlash = true + } + case "*": + // Match single path segment. + if needSlash { + expr.WriteString("/") + } + expr.WriteString("[^/]+") + needSlash = true + default: + // Match segment glob pattern. + if needSlash { + expr.WriteString("/") + } + expr.WriteString(translateGlob(seg)) + needSlash = true + } + } + expr.WriteString("$") + p.Regex = expr.String() + return p +} + +// NOTE: This is derived from `fnmatch.translate()` and is similar to +// the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set. +func translateGlob(glob string) string { + var regex bytes.Buffer + escape := false + + for i := 0; i < len(glob); i++ { + char := glob[i] + // Escape the character. + switch { + case escape: + escape = false + regex.WriteString(regexp.QuoteMeta(string(char))) + case char == '\\': + // Escape character, escape next character. + escape = true + case char == '*': + // Multi-character wildcard. Match any string (except slashes), + // including an empty string. + regex.WriteString("[^/]*") + case char == '?': + // Single-character wildcard. Match any single character (except + // a slash). + regex.WriteString("[^/]") + case char == '[': + regex.WriteString(translateBracketExpression(&i, glob)) + default: + // Regular character, escape it for regex. + regex.WriteString(regexp.QuoteMeta(string(char))) + } + } + return regex.String() +} + +// Bracket expression wildcard. Except for the beginning +// exclamation mark, the whole bracket expression can be used +// directly as regex but we have to find where the expression +// ends. +// - "[][!]" matches ']', '[' and '!'. +// - "[]-]" matches ']' and '-'. +// - "[!]a-]" matches any character except ']', 'a' and '-'. +func translateBracketExpression(i *int, glob string) string { + regex := string(glob[*i]) + *i++ + j := *i + + // Pass bracket expression negation. + if j < len(glob) && glob[j] == '!' { + j++ + } + // Pass first closing bracket if it is at the beginning of the + // expression. + if j < len(glob) && glob[j] == ']' { + j++ + } + // Find closing bracket. Stop once we reach the end or find it. + for j < len(glob) && glob[j] != ']' { + j++ + } + + if j < len(glob) { + if glob[*i] == '!' { + regex = regex + "^" + *i++ + } + regex = regexp.QuoteMeta(glob[*i:j]) + *i = j + } else { + // Failed to find closing bracket, treat opening bracket as a + // bracket literal instead of as an expression. + regex = regexp.QuoteMeta(string(glob[*i])) + } + return "[" + regex + "]" +} diff --git a/vendor/github.com/spdx/tools-golang/LICENSE.code b/vendor/github.com/spdx/tools-golang/LICENSE.code new file mode 100644 index 0000000000..07efb6292a --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/LICENSE.code @@ -0,0 +1,550 @@ +The tools-golang source code is provided and may be used, at your option, +under either: +* Apache License, version 2.0 (Apache-2.0), OR +* GNU General Public License, version 2.0 or later (GPL-2.0-or-later). + +Copies of both licenses are included below. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/github.com/spdx/tools-golang/LICENSE.docs b/vendor/github.com/spdx/tools-golang/LICENSE.docs new file mode 100644 index 0000000000..2c8e93cbda --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/LICENSE.docs @@ -0,0 +1,399 @@ +The tools-golang documentation is provided under the Creative Commons Attribution +4.0 International license (CC-BY-4.0), a copy of which is provided below. + +Attribution 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution 4.0 International Public License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution 4.0 International Public License ("Public License"). To the +extent this Public License may be interpreted as a contract, You are +granted the Licensed Rights in consideration of Your acceptance of +these terms and conditions, and the Licensor grants You such rights in +consideration of benefits the Licensor receives from making the +Licensed Material available under these terms and conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + j. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + k. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. + diff --git a/vendor/github.com/spdx/tools-golang/json/parser.go b/vendor/github.com/spdx/tools-golang/json/parser.go new file mode 100644 index 0000000000..ee7915de0f --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/json/parser.go @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package spdx_json + +import ( + "bytes" + "encoding/json" + "io" + + "github.com/spdx/tools-golang/spdx/v2_2" + "github.com/spdx/tools-golang/spdx/v2_3" +) + +// Load2_2 takes in an io.Reader and returns an SPDX document. +func Load2_2(content io.Reader) (*v2_2.Document, error) { + // convert io.Reader to a slice of bytes and call the parser + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(content) + if err != nil { + return nil, err + } + + var doc v2_2.Document + err = json.Unmarshal(buf.Bytes(), &doc) + if err != nil { + return nil, err + } + + return &doc, nil +} + +// Load2_3 takes in an io.Reader and returns an SPDX document. +func Load2_3(content io.Reader) (*v2_3.Document, error) { + // convert io.Reader to a slice of bytes and call the parser + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(content) + if err != nil { + return nil, err + } + + var doc v2_3.Document + err = json.Unmarshal(buf.Bytes(), &doc) + if err != nil { + return nil, err + } + + return &doc, nil +} diff --git a/vendor/github.com/spdx/tools-golang/json/writer.go b/vendor/github.com/spdx/tools-golang/json/writer.go new file mode 100644 index 0000000000..8f2b94dc60 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/json/writer.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package spdx_json + +import ( + "encoding/json" + "github.com/spdx/tools-golang/spdx/v2_3" + "io" + + "github.com/spdx/tools-golang/spdx/v2_2" +) + +// Save2_2 takes an SPDX Document (version 2.2) and an io.Writer, and writes the document to the writer in JSON format. +func Save2_2(doc *v2_2.Document, w io.Writer) error { + buf, err := json.Marshal(doc) + if err != nil { + return err + } + + _, err = w.Write(buf) + if err != nil { + return err + } + + return nil +} + +// Save2_3 takes an SPDX Document (version 2.2) and an io.Writer, and writes the document to the writer in JSON format. +func Save2_3(doc *v2_3.Document, w io.Writer) error { + buf, err := json.Marshal(doc) + if err != nil { + return err + } + + _, err = w.Write(buf) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/annotation.go b/vendor/github.com/spdx/tools-golang/spdx/common/annotation.go new file mode 100644 index 0000000000..e77d7b780a --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/annotation.go @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +import ( + "encoding/json" + "fmt" + "strings" +) + +type Annotator struct { + Annotator string + // including AnnotatorType: one of "Person", "Organization" or "Tool" + AnnotatorType string +} + +// UnmarshalJSON takes an annotator in the typical one-line format and parses it into an Annotator struct. +// This function is also used when unmarshalling YAML +func (a *Annotator) UnmarshalJSON(data []byte) error { + // annotator will simply be a string + annotatorStr := string(data) + annotatorStr = strings.Trim(annotatorStr, "\"") + + annotatorFields := strings.SplitN(annotatorStr, ": ", 2) + + if len(annotatorFields) != 2 { + return fmt.Errorf("failed to parse Annotator '%s'", annotatorStr) + } + + a.AnnotatorType = annotatorFields[0] + a.Annotator = annotatorFields[1] + + return nil +} + +// MarshalJSON converts the receiver into a slice of bytes representing an Annotator in string form. +// This function is also used when marshalling to YAML +func (a Annotator) MarshalJSON() ([]byte, error) { + if a.Annotator != "" { + return json.Marshal(fmt.Sprintf("%s: %s", a.AnnotatorType, a.Annotator)) + } + + return []byte{}, nil +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/checksum.go b/vendor/github.com/spdx/tools-golang/spdx/common/checksum.go new file mode 100644 index 0000000000..aa2ae52ff1 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/checksum.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +// ChecksumAlgorithm represents the algorithm used to generate the file checksum in the Checksum struct. +type ChecksumAlgorithm string + +// The checksum algorithms mentioned in the spdxv2.2.0 https://spdx.github.io/spdx-spec/4-file-information/#44-file-checksum +const ( + SHA224 ChecksumAlgorithm = "SHA224" + SHA1 ChecksumAlgorithm = "SHA1" + SHA256 ChecksumAlgorithm = "SHA256" + SHA384 ChecksumAlgorithm = "SHA384" + SHA512 ChecksumAlgorithm = "SHA512" + MD2 ChecksumAlgorithm = "MD2" + MD4 ChecksumAlgorithm = "MD4" + MD5 ChecksumAlgorithm = "MD5" + MD6 ChecksumAlgorithm = "MD6" + SHA3_256 ChecksumAlgorithm = "SHA3-256" + SHA3_384 ChecksumAlgorithm = "SHA3-384" + SHA3_512 ChecksumAlgorithm = "SHA3-512" + BLAKE2b_256 ChecksumAlgorithm = "BLAKE2b-256" + BLAKE2b_384 ChecksumAlgorithm = "BLAKE2b-384" + BLAKE2b_512 ChecksumAlgorithm = "BLAKE2b-512" + BLAKE3 ChecksumAlgorithm = "BLAKE3" + ADLER32 ChecksumAlgorithm = "ADLER32" +) + +// Checksum provides a unique identifier to match analysis information on each specific file in a package. +// The Algorithm field describes the ChecksumAlgorithm used and the Value represents the file checksum +type Checksum struct { + Algorithm ChecksumAlgorithm `json:"algorithm"` + Value string `json:"checksumValue"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/creation_info.go b/vendor/github.com/spdx/tools-golang/spdx/common/creation_info.go new file mode 100644 index 0000000000..c87ae7be92 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/creation_info.go @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +import ( + "encoding/json" + "fmt" + "strings" +) + +// Creator is a wrapper around the Creator SPDX field. The SPDX field contains two values, which requires special +// handling in order to marshal/unmarshal it to/from Go data types. +type Creator struct { + Creator string + // CreatorType should be one of "Person", "Organization", or "Tool" + CreatorType string +} + +// UnmarshalJSON takes an annotator in the typical one-line format and parses it into a Creator struct. +// This function is also used when unmarshalling YAML +func (c *Creator) UnmarshalJSON(data []byte) error { + str := string(data) + str = strings.Trim(str, "\"") + fields := strings.SplitN(str, ": ", 2) + + if len(fields) != 2 { + return fmt.Errorf("failed to parse Creator '%s'", str) + } + + c.CreatorType = fields[0] + c.Creator = fields[1] + + return nil +} + +// MarshalJSON converts the receiver into a slice of bytes representing a Creator in string form. +// This function is also used with marshalling to YAML +func (c Creator) MarshalJSON() ([]byte, error) { + if c.Creator != "" { + return json.Marshal(fmt.Sprintf("%s: %s", c.CreatorType, c.Creator)) + } + + return []byte{}, nil +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/external.go b/vendor/github.com/spdx/tools-golang/spdx/common/external.go new file mode 100644 index 0000000000..59c3f0f03f --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/external.go @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +// Constants for various string types +const ( + // F.2 Security types + TypeSecurityCPE23Type string = "cpe23Type" + TypeSecurityCPE22Type string = "cpe22Type" + TypeSecurityAdvisory string = "advisory" + TypeSecurityFix string = "fix" + TypeSecurityUrl string = "url" + TypeSecuritySwid string = "swid" + + // F.3 Package-Manager types + TypePackageManagerMavenCentral string = "maven-central" + TypePackageManagerNpm string = "npm" + TypePackageManagerNuGet string = "nuget" + TypePackageManagerBower string = "bower" + TypePackageManagerPURL string = "purl" + + // 11.1 Relationship field types + TypeRelationshipDescribe string = "DESCRIBES" + TypeRelationshipDescribeBy string = "DESCRIBED_BY" + TypeRelationshipContains string = "CONTAINS" + TypeRelationshipContainedBy string = "CONTAINED_BY" + TypeRelationshipDependsOn string = "DEPENDS_ON" + TypeRelationshipDependencyOf string = "DEPENDENCY_OF" + TypeRelationshipBuildDependencyOf string = "BUILD_DEPENDENCY_OF" + TypeRelationshipDevDependencyOf string = "DEV_DEPENDENCY_OF" + TypeRelationshipOptionalDependencyOf string = "OPTIONAL_DEPENDENCY_OF" + TypeRelationshipProvidedDependencyOf string = "PROVIDED_DEPENDENCY_OF" + TypeRelationshipTestDependencyOf string = "TEST_DEPENDENCY_OF" + TypeRelationshipRuntimeDependencyOf string = "RUNTIME_DEPENDENCY_OF" + TypeRelationshipExampleOf string = "EXAMPLE_OF" + TypeRelationshipGenerates string = "GENERATES" + TypeRelationshipGeneratedFrom string = "GENERATED_FROM" + TypeRelationshipAncestorOf string = "ANCESTOR_OF" + TypeRelationshipDescendantOf string = "DESCENDANT_OF" + TypeRelationshipVariantOf string = "VARIANT_OF" + TypeRelationshipDistributionArtifact string = "DISTRIBUTION_ARTIFACT" + TypeRelationshipPatchFor string = "PATCH_FOR" + TypeRelationshipPatchApplied string = "PATCH_APPLIED" + TypeRelationshipCopyOf string = "COPY_OF" + TypeRelationshipFileAdded string = "FILE_ADDED" + TypeRelationshipFileDeleted string = "FILE_DELETED" + TypeRelationshipFileModified string = "FILE_MODIFIED" + TypeRelationshipExpandedFromArchive string = "EXPANDED_FROM_ARCHIVE" + TypeRelationshipDynamicLink string = "DYNAMIC_LINK" + TypeRelationshipStaticLink string = "STATIC_LINK" + TypeRelationshipDataFileOf string = "DATA_FILE_OF" + TypeRelationshipTestCaseOf string = "TEST_CASE_OF" + TypeRelationshipBuildToolOf string = "BUILD_TOOL_OF" + TypeRelationshipDevToolOf string = "DEV_TOOL_OF" + TypeRelationshipTestOf string = "TEST_OF" + TypeRelationshipTestToolOf string = "TEST_TOOL_OF" + TypeRelationshipDocumentationOf string = "DOCUMENTATION_OF" + TypeRelationshipOptionalComponentOf string = "OPTIONAL_COMPONENT_OF" + TypeRelationshipMetafileOf string = "METAFILE_OF" + TypeRelationshipPackageOf string = "PACKAGE_OF" + TypeRelationshipAmends string = "AMENDS" + TypeRelationshipPrerequisiteFor string = "PREREQUISITE_FOR" + TypeRelationshipHasPrerequisite string = "HAS_PREREQUISITE" + TypeRelationshipRequirementDescriptionFor string = "REQUIREMENT_DESCRIPTION_FOR" + TypeRelationshipSpecificationFor string = "SPECIFICATION_FOR" + TypeRelationshipOther string = "OTHER" +) diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/identifier.go b/vendor/github.com/spdx/tools-golang/spdx/common/identifier.go new file mode 100644 index 0000000000..806a8157e2 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/identifier.go @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +import ( + "encoding/json" + "fmt" + "strings" +) + +const ( + spdxRefPrefix = "SPDXRef-" + documentRefPrefix = "DocumentRef-" +) + +// ElementID represents the identifier string portion of an SPDX element +// identifier. DocElementID should be used for any attributes which can +// contain identifiers defined in a different SPDX document. +// ElementIDs should NOT contain the mandatory 'SPDXRef-' portion. +type ElementID string + +// MarshalJSON returns an SPDXRef- prefixed JSON string +func (d ElementID) MarshalJSON() ([]byte, error) { + return json.Marshal(prefixElementId(d)) +} + +// UnmarshalJSON validates SPDXRef- prefixes and removes them when processing ElementIDs +func (d *ElementID) UnmarshalJSON(data []byte) error { + // SPDX identifier will simply be a string + idStr := string(data) + idStr = strings.Trim(idStr, "\"") + + e, err := trimElementIdPrefix(idStr) + if err != nil { + return err + } + *d = e + return nil +} + +// prefixElementId adds the SPDXRef- prefix to an element ID if it does not have one +func prefixElementId(id ElementID) string { + val := string(id) + if !strings.HasPrefix(val, spdxRefPrefix) { + return spdxRefPrefix + val + } + return val +} + +// trimElementIdPrefix removes the SPDXRef- prefix from an element ID string or returns an error if it +// does not start with SPDXRef- +func trimElementIdPrefix(id string) (ElementID, error) { + // handle SPDXRef- + idFields := strings.SplitN(id, spdxRefPrefix, 2) + if len(idFields) != 2 { + return "", fmt.Errorf("failed to parse SPDX identifier '%s'", id) + } + + e := ElementID(idFields[1]) + return e, nil +} + +// DocElementID represents an SPDX element identifier that could be defined +// in a different SPDX document, and therefore could have a "DocumentRef-" +// portion, such as Relationships and Annotations. +// ElementID is used for attributes in which a "DocumentRef-" portion cannot +// appear, such as a Package or File definition (since it is necessarily +// being defined in the present document). +// DocumentRefID will be the empty string for elements defined in the +// present document. +// DocElementIDs should NOT contain the mandatory 'DocumentRef-' or +// 'SPDXRef-' portions. +// SpecialID is used ONLY if the DocElementID matches a defined set of +// permitted special values for a particular field, e.g. "NONE" or +// "NOASSERTION" for the right-hand side of Relationships. If SpecialID +// is set, DocumentRefID and ElementRefID should be empty (and vice versa). +type DocElementID struct { + DocumentRefID string + ElementRefID ElementID + SpecialID string +} + +// MarshalJSON converts the receiver into a slice of bytes representing a DocElementID in string form. +// This function is also used when marshalling to YAML +func (d DocElementID) MarshalJSON() ([]byte, error) { + if d.DocumentRefID != "" && d.ElementRefID != "" { + idStr := prefixElementId(d.ElementRefID) + return json.Marshal(fmt.Sprintf("%s%s:%s", documentRefPrefix, d.DocumentRefID, idStr)) + } else if d.ElementRefID != "" { + return json.Marshal(prefixElementId(d.ElementRefID)) + } else if d.SpecialID != "" { + return json.Marshal(d.SpecialID) + } + + return []byte{}, fmt.Errorf("failed to marshal empty DocElementID") +} + +// UnmarshalJSON takes a SPDX Identifier string parses it into a DocElementID struct. +// This function is also used when unmarshalling YAML +func (d *DocElementID) UnmarshalJSON(data []byte) (err error) { + // SPDX identifier will simply be a string + idStr := string(data) + idStr = strings.Trim(idStr, "\"") + + // handle special cases + if idStr == "NONE" || idStr == "NOASSERTION" { + d.SpecialID = idStr + return nil + } + + var idFields []string + // handle DocumentRef- if present + if strings.HasPrefix(idStr, documentRefPrefix) { + // strip out the "DocumentRef-" so we can get the value + idFields = strings.SplitN(idStr, documentRefPrefix, 2) + idStr = idFields[1] + + // an SPDXRef can appear after a DocumentRef, separated by a colon + idFields = strings.SplitN(idStr, ":", 2) + d.DocumentRefID = idFields[0] + + if len(idFields) == 2 { + idStr = idFields[1] + } else { + return nil + } + } + + d.ElementRefID, err = trimElementIdPrefix(idStr) + return err +} + +// TODO: add equivalents for LicenseRef- identifiers + +// MakeDocElementID takes strings (without prefixes) for the DocumentRef- +// and SPDXRef- identifiers, and returns a DocElementID. An empty string +// should be used for the DocumentRef- portion if it is referring to the +// present document. +func MakeDocElementID(docRef string, eltRef string) DocElementID { + return DocElementID{ + DocumentRefID: docRef, + ElementRefID: ElementID(eltRef), + } +} + +// MakeDocElementSpecial takes a "special" string (e.g. "NONE" or +// "NOASSERTION" for the right side of a Relationship), nd returns +// a DocElementID with it in the SpecialID field. Other fields will +// be empty. +func MakeDocElementSpecial(specialID string) DocElementID { + return DocElementID{SpecialID: specialID} +} + +// RenderElementID takes an ElementID and returns the string equivalent, +// with the SPDXRef- prefix reinserted. +func RenderElementID(eID ElementID) string { + return spdxRefPrefix + string(eID) +} + +// RenderDocElementID takes a DocElementID and returns the string equivalent, +// with the SPDXRef- prefix (and, if applicable, the DocumentRef- prefix) +// reinserted. If a SpecialID is present, it will be rendered verbatim and +// DocumentRefID and ElementRefID will be ignored. +func RenderDocElementID(deID DocElementID) string { + if deID.SpecialID != "" { + return deID.SpecialID + } + prefix := "" + if deID.DocumentRefID != "" { + prefix = documentRefPrefix + deID.DocumentRefID + ":" + } + return prefix + spdxRefPrefix + string(deID.ElementRefID) +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/package.go b/vendor/github.com/spdx/tools-golang/spdx/common/package.go new file mode 100644 index 0000000000..de5a07523f --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/package.go @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +import ( + "encoding/json" + "fmt" + "strings" +) + +type Supplier struct { + // can be "NOASSERTION" + Supplier string + // SupplierType can be one of "Person", "Organization", or empty if Supplier is "NOASSERTION" + SupplierType string +} + +// UnmarshalJSON takes a supplier in the typical one-line format and parses it into a Supplier struct. +// This function is also used when unmarshalling YAML +func (s *Supplier) UnmarshalJSON(data []byte) error { + // the value is just a string presented as a slice of bytes + supplierStr := string(data) + supplierStr = strings.Trim(supplierStr, "\"") + + if supplierStr == "NOASSERTION" { + s.Supplier = supplierStr + return nil + } + + supplierFields := strings.SplitN(supplierStr, ": ", 2) + + if len(supplierFields) != 2 { + return fmt.Errorf("failed to parse Supplier '%s'", supplierStr) + } + + s.SupplierType = supplierFields[0] + s.Supplier = supplierFields[1] + + return nil +} + +// MarshalJSON converts the receiver into a slice of bytes representing a Supplier in string form. +// This function is also used when marshalling to YAML +func (s Supplier) MarshalJSON() ([]byte, error) { + if s.Supplier == "NOASSERTION" { + return json.Marshal(s.Supplier) + } else if s.SupplierType != "" && s.Supplier != "" { + return json.Marshal(fmt.Sprintf("%s: %s", s.SupplierType, s.Supplier)) + } + + return []byte{}, fmt.Errorf("failed to marshal invalid Supplier: %+v", s) +} + +type Originator struct { + // can be "NOASSERTION" + Originator string + // OriginatorType can be one of "Person", "Organization", or empty if Originator is "NOASSERTION" + OriginatorType string +} + +// UnmarshalJSON takes an originator in the typical one-line format and parses it into an Originator struct. +// This function is also used when unmarshalling YAML +func (o *Originator) UnmarshalJSON(data []byte) error { + // the value is just a string presented as a slice of bytes + originatorStr := string(data) + originatorStr = strings.Trim(originatorStr, "\"") + + if originatorStr == "NOASSERTION" { + o.Originator = originatorStr + return nil + } + + originatorFields := strings.SplitN(originatorStr, ": ", 2) + + if len(originatorFields) != 2 { + return fmt.Errorf("failed to parse Originator '%s'", originatorStr) + } + + o.OriginatorType = originatorFields[0] + o.Originator = originatorFields[1] + + return nil +} + +// MarshalJSON converts the receiver into a slice of bytes representing an Originator in string form. +// This function is also used when marshalling to YAML +func (o Originator) MarshalJSON() ([]byte, error) { + if o.Originator == "NOASSERTION" { + return json.Marshal(o.Originator) + } else if o.Originator != "" { + return json.Marshal(fmt.Sprintf("%s: %s", o.OriginatorType, o.Originator)) + } + + return []byte{}, nil +} + +type PackageVerificationCode struct { + // Cardinality: mandatory, one if filesAnalyzed is true / omitted; + // zero (must be omitted) if filesAnalyzed is false + Value string `json:"packageVerificationCodeValue"` + // Spec also allows specifying files to exclude from the + // verification code algorithm; intended to enable exclusion of + // the SPDX document file itself. + ExcludedFiles []string `json:"packageVerificationCodeExcludedFiles,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/common/snippet.go b/vendor/github.com/spdx/tools-golang/spdx/common/snippet.go new file mode 100644 index 0000000000..63afac3ba2 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/common/snippet.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package common + +type SnippetRangePointer struct { + // 5.3: Snippet Byte Range: [start byte]:[end byte] + // Cardinality: mandatory, one + Offset int `json:"offset,omitempty"` + + // 5.4: Snippet Line Range: [start line]:[end line] + // Cardinality: optional, one + LineNumber int `json:"lineNumber,omitempty"` + + FileSPDXIdentifier ElementID `json:"reference"` +} + +type SnippetRange struct { + StartPointer SnippetRangePointer `json:"startPointer"` + EndPointer SnippetRangePointer `json:"endPointer"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/annotation.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/annotation.go new file mode 100644 index 0000000000..35eddc617e --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/annotation.go @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +import "github.com/spdx/tools-golang/spdx/common" + +// Annotation is an Annotation section of an SPDX Document for version 2.2 of the spec. +type Annotation struct { + // 12.1: Annotator + // Cardinality: conditional (mandatory, one) if there is an Annotation + Annotator common.Annotator `json:"annotator"` + + // 12.2: Annotation Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationDate string `json:"annotationDate"` + + // 12.3: Annotation Type: "REVIEW" or "OTHER" + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationType string `json:"annotationType"` + + // 12.4: SPDX Identifier Reference + // Cardinality: conditional (mandatory, one) if there is an Annotation + // This field is not used in hierarchical data formats where the referenced element is clear, such as JSON or YAML. + AnnotationSPDXIdentifier common.DocElementID `json:"-"` + + // 12.5: Annotation Comment + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationComment string `json:"comment"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/creation_info.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/creation_info.go new file mode 100644 index 0000000000..70e611f79b --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/creation_info.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +import "github.com/spdx/tools-golang/spdx/common" + +// CreationInfo is a Document Creation Information section of an +// SPDX Document for version 2.2 of the spec. +type CreationInfo struct { + // 6.7: License List Version + // Cardinality: optional, one + LicenseListVersion string `json:"licenseListVersion"` + + // 6.8: Creators: may have multiple keys for Person, Organization + // and/or Tool + // Cardinality: mandatory, one or many + Creators []common.Creator `json:"creators"` + + // 6.9: Created: data format YYYY-MM-DDThh:mm:ssZ + // Cardinality: mandatory, one + Created string `json:"created"` + + // 6.10: Creator Comment + // Cardinality: optional, one + CreatorComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/document.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/document.go new file mode 100644 index 0000000000..31ac08b6c7 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/document.go @@ -0,0 +1,65 @@ +// Package spdx contains the struct definition for an SPDX Document +// and its constituent parts. +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +package v2_2 + +import "github.com/spdx/tools-golang/spdx/common" + +// ExternalDocumentRef is a reference to an external SPDX document +// as defined in section 6.6 for version 2.2 of the spec. +type ExternalDocumentRef struct { + // DocumentRefID is the ID string defined in the start of the + // reference. It should _not_ contain the "DocumentRef-" part + // of the mandatory ID string. + DocumentRefID string `json:"externalDocumentId"` + + // URI is the URI defined for the external document + URI string `json:"spdxDocument"` + + // Checksum is the actual hash data + Checksum common.Checksum `json:"checksum"` +} + +// Document is an SPDX Document for version 2.2 of the spec. +// See https://spdx.github.io/spdx-spec/v2-draft/ (DRAFT) +type Document struct { + // 6.1: SPDX Version; should be in the format "SPDX-2.2" + // Cardinality: mandatory, one + SPDXVersion string `json:"spdxVersion"` + + // 6.2: Data License; should be "CC0-1.0" + // Cardinality: mandatory, one + DataLicense string `json:"dataLicense"` + + // 6.3: SPDX Identifier; should be "DOCUMENT" to represent + // mandatory identifier of SPDXRef-DOCUMENT + // Cardinality: mandatory, one + SPDXIdentifier common.ElementID `json:"SPDXID"` + + // 6.4: Document Name + // Cardinality: mandatory, one + DocumentName string `json:"name"` + + // 6.5: Document Namespace + // Cardinality: mandatory, one + DocumentNamespace string `json:"documentNamespace"` + + // 6.6: External Document References + // Cardinality: optional, one or many + ExternalDocumentReferences []ExternalDocumentRef `json:"externalDocumentRefs,omitempty"` + + // 6.11: Document Comment + // Cardinality: optional, one + DocumentComment string `json:"comment,omitempty"` + + CreationInfo *CreationInfo `json:"creationInfo"` + Packages []*Package `json:"packages,omitempty"` + Files []*File `json:"files,omitempty"` + OtherLicenses []*OtherLicense `json:"hasExtractedLicensingInfos,omitempty"` + Relationships []*Relationship `json:"relationships,omitempty"` + Annotations []*Annotation `json:"annotations,omitempty"` + Snippets []Snippet `json:"snippets,omitempty"` + + // DEPRECATED in version 2.0 of spec + Reviews []*Review `json:"-"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/file.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/file.go new file mode 100644 index 0000000000..150e79f0bb --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/file.go @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +import "github.com/spdx/tools-golang/spdx/common" + +// File is a File section of an SPDX Document for version 2.2 of the spec. +type File struct { + // 8.1: File Name + // Cardinality: mandatory, one + FileName string `json:"fileName"` + + // 8.2: File SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + FileSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 8.3: File Types + // Cardinality: optional, multiple + FileTypes []string `json:"fileTypes,omitempty"` + + // 8.4: File Checksum: may have keys for SHA1, SHA256 and/or MD5 + // Cardinality: mandatory, one SHA1, others may be optionally provided + Checksums []common.Checksum `json:"checksums"` + + // 8.5: Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + LicenseConcluded string `json:"licenseConcluded"` + + // 8.6: License Information in File: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one or many + LicenseInfoInFiles []string `json:"licenseInfoInFiles"` + + // 8.7: Comments on License + // Cardinality: optional, one + LicenseComments string `json:"licenseComments,omitempty"` + + // 8.8: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + FileCopyrightText string `json:"copyrightText"` + + // DEPRECATED in version 2.1 of spec + // 8.9-8.11: Artifact of Project variables (defined below) + // Cardinality: optional, one or many + ArtifactOfProjects []*ArtifactOfProject `json:"-"` + + // 8.12: File Comment + // Cardinality: optional, one + FileComment string `json:"comment,omitempty"` + + // 8.13: File Notice + // Cardinality: optional, one + FileNotice string `json:"noticeText,omitempty"` + + // 8.14: File Contributor + // Cardinality: optional, one or many + FileContributors []string `json:"fileContributors,omitempty"` + + // 8.15: File Attribution Text + // Cardinality: optional, one or many + FileAttributionTexts []string `json:"attributionTexts,omitempty"` + + // DEPRECATED in version 2.0 of spec + // 8.16: File Dependencies + // Cardinality: optional, one or many + FileDependencies []string `json:"-"` + + // Snippets contained in this File + // Note that Snippets could be defined in a different Document! However, + // the only ones that _THIS_ document can contain are this ones that are + // defined here -- so this should just be an ElementID. + Snippets map[common.ElementID]*Snippet `json:"-"` + + Annotations []Annotation `json:"annotations,omitempty"` +} + +// ArtifactOfProject is a DEPRECATED collection of data regarding +// a Package, as defined in sections 8.9-8.11 in version 2.2 of the spec. +type ArtifactOfProject struct { + + // DEPRECATED in version 2.1 of spec + // 8.9: Artifact of Project Name + // Cardinality: conditional, required if present, one per AOP + Name string + + // DEPRECATED in version 2.1 of spec + // 8.10: Artifact of Project Homepage: URL or "UNKNOWN" + // Cardinality: optional, one per AOP + HomePage string + + // DEPRECATED in version 2.1 of spec + // 8.11: Artifact of Project Uniform Resource Identifier + // Cardinality: optional, one per AOP + URI string +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/other_license.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/other_license.go new file mode 100644 index 0000000000..1eaf048ddb --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/other_license.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +// OtherLicense is an Other License Information section of an +// SPDX Document for version 2.2 of the spec. +type OtherLicense struct { + // 10.1: License Identifier: "LicenseRef-[idstring]" + // Cardinality: conditional (mandatory, one) if license is not + // on SPDX License List + LicenseIdentifier string `json:"licenseId"` + + // 10.2: Extracted Text + // Cardinality: conditional (mandatory, one) if there is a + // License Identifier assigned + ExtractedText string `json:"extractedText"` + + // 10.3: License Name: single line of text or "NOASSERTION" + // Cardinality: conditional (mandatory, one) if license is not + // on SPDX License List + LicenseName string `json:"name,omitempty"` + + // 10.4: License Cross Reference + // Cardinality: conditional (optional, one or many) if license + // is not on SPDX License List + LicenseCrossReferences []string `json:"seeAlsos,omitempty"` + + // 10.5: License Comment + // Cardinality: optional, one + LicenseComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/package.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/package.go new file mode 100644 index 0000000000..2d99e0456b --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/package.go @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +import "github.com/spdx/tools-golang/spdx/common" + +// Package is a Package section of an SPDX Document for version 2.2 of the spec. +type Package struct { + // NOT PART OF SPEC + // flag: does this "package" contain files that were in fact "unpackaged", + // e.g. included directly in the Document without being in a Package? + IsUnpackaged bool `json:"-"` + + // 7.1: Package Name + // Cardinality: mandatory, one + PackageName string `json:"name"` + + // 7.2: Package SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + PackageSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 7.3: Package Version + // Cardinality: optional, one + PackageVersion string `json:"versionInfo,omitempty"` + + // 7.4: Package File Name + // Cardinality: optional, one + PackageFileName string `json:"packageFileName,omitempty"` + + // 7.5: Package Supplier: may have single result for either Person or Organization, + // or NOASSERTION + // Cardinality: optional, one + PackageSupplier *common.Supplier `json:"supplier,omitempty"` + + // 7.6: Package Originator: may have single result for either Person or Organization, + // or NOASSERTION + // Cardinality: optional, one + PackageOriginator *common.Originator `json:"originator,omitempty"` + + // 7.7: Package Download Location + // Cardinality: mandatory, one + PackageDownloadLocation string `json:"downloadLocation"` + + // 7.8: FilesAnalyzed + // Cardinality: optional, one; default value is "true" if omitted + FilesAnalyzed bool `json:"filesAnalyzed,omitempty"` + // NOT PART OF SPEC: did FilesAnalyzed tag appear? + IsFilesAnalyzedTagPresent bool `json:"-"` + + // 7.9: Package Verification Code + PackageVerificationCode common.PackageVerificationCode `json:"packageVerificationCode"` + + // 7.10: Package Checksum: may have keys for SHA1, SHA256, SHA512 and/or MD5 + // Cardinality: optional, one or many + PackageChecksums []common.Checksum `json:"checksums,omitempty"` + + // 7.11: Package Home Page + // Cardinality: optional, one + PackageHomePage string `json:"homepage,omitempty"` + + // 7.12: Source Information + // Cardinality: optional, one + PackageSourceInfo string `json:"sourceInfo,omitempty"` + + // 7.13: Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + PackageLicenseConcluded string `json:"licenseConcluded"` + + // 7.14: All Licenses Info from Files: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one or many if filesAnalyzed is true / omitted; + // zero (must be omitted) if filesAnalyzed is false + PackageLicenseInfoFromFiles []string `json:"licenseInfoFromFiles"` + + // 7.15: Declared License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + PackageLicenseDeclared string `json:"licenseDeclared"` + + // 7.16: Comments on License + // Cardinality: optional, one + PackageLicenseComments string `json:"licenseComments,omitempty"` + + // 7.17: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + PackageCopyrightText string `json:"copyrightText"` + + // 7.18: Package Summary Description + // Cardinality: optional, one + PackageSummary string `json:"summary,omitempty"` + + // 7.19: Package Detailed Description + // Cardinality: optional, one + PackageDescription string `json:"description,omitempty"` + + // 7.20: Package Comment + // Cardinality: optional, one + PackageComment string `json:"comment,omitempty"` + + // 7.21: Package External Reference + // Cardinality: optional, one or many + PackageExternalReferences []*PackageExternalReference `json:"externalRefs,omitempty"` + + // 7.22: Package External Reference Comment + // Cardinality: conditional (optional, one) for each External Reference + // contained within PackageExternalReference2_1 struct, if present + + // 7.23: Package Attribution Text + // Cardinality: optional, one or many + PackageAttributionTexts []string `json:"attributionTexts,omitempty"` + + // Files contained in this Package + Files []*File `json:"files,omitempty"` + + Annotations []Annotation `json:"annotations,omitempty"` +} + +// PackageExternalReference is an External Reference to additional info +// about a Package, as defined in section 7.21 in version 2.2 of the spec. +type PackageExternalReference struct { + // category is "SECURITY", "PACKAGE-MANAGER" or "OTHER" + Category string `json:"referenceCategory"` + + // type is an [idstring] as defined in Appendix VI; + // called RefType here due to "type" being a Golang keyword + RefType string `json:"referenceType"` + + // locator is a unique string to access the package-specific + // info, metadata or content within the target location + Locator string `json:"referenceLocator"` + + // 7.22: Package External Reference Comment + // Cardinality: conditional (optional, one) for each External Reference + ExternalRefComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/relationship.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/relationship.go new file mode 100644 index 0000000000..a93baa714d --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/relationship.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +import "github.com/spdx/tools-golang/spdx/common" + +// Relationship is a Relationship section of an SPDX Document for +// version 2.2 of the spec. +type Relationship struct { + + // 11.1: Relationship + // Cardinality: optional, one or more; one per Relationship + // one mandatory for SPDX Document with multiple packages + // RefA and RefB are first and second item + // Relationship is type from 11.1.1 + RefA common.DocElementID `json:"spdxElementId"` + RefB common.DocElementID `json:"relatedSpdxElement"` + Relationship string `json:"relationshipType"` + + // 11.2: Relationship Comment + // Cardinality: optional, one + RelationshipComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/review.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/review.go new file mode 100644 index 0000000000..22b3b8a081 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/review.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +// Review is a Review section of an SPDX Document for version 2.2 of the spec. +// DEPRECATED in version 2.0 of spec; retained here for compatibility. +type Review struct { + + // DEPRECATED in version 2.0 of spec + // 13.1: Reviewer + // Cardinality: optional, one + Reviewer string + // including AnnotatorType: one of "Person", "Organization" or "Tool" + ReviewerType string + + // DEPRECATED in version 2.0 of spec + // 13.2: Review Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: conditional (mandatory, one) if there is a Reviewer + ReviewDate string + + // DEPRECATED in version 2.0 of spec + // 13.3: Review Comment + // Cardinality: optional, one + ReviewComment string +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_2/snippet.go b/vendor/github.com/spdx/tools-golang/spdx/v2_2/snippet.go new file mode 100644 index 0000000000..61045f1e02 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_2/snippet.go @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_2 + +import "github.com/spdx/tools-golang/spdx/common" + +// Snippet is a Snippet section of an SPDX Document for version 2.2 of the spec. +type Snippet struct { + + // 9.1: Snippet SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + SnippetSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 9.2: Snippet from File SPDX Identifier + // Cardinality: mandatory, one + SnippetFromFileSPDXIdentifier common.ElementID `json:"snippetFromFile"` + + // Ranges denotes the start/end byte offsets or line numbers that the snippet is relevant to + Ranges []common.SnippetRange `json:"ranges"` + + // 9.5: Snippet Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + SnippetLicenseConcluded string `json:"licenseConcluded"` + + // 9.6: License Information in Snippet: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one or many + LicenseInfoInSnippet []string `json:"licenseInfoInSnippets,omitempty"` + + // 9.7: Snippet Comments on License + // Cardinality: optional, one + SnippetLicenseComments string `json:"licenseComments,omitempty"` + + // 9.8: Snippet Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + SnippetCopyrightText string `json:"copyrightText"` + + // 9.9: Snippet Comment + // Cardinality: optional, one + SnippetComment string `json:"comment,omitempty"` + + // 9.10: Snippet Name + // Cardinality: optional, one + SnippetName string `json:"name,omitempty"` + + // 9.11: Snippet Attribution Text + // Cardinality: optional, one or many + SnippetAttributionTexts []string `json:"-"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/annotation.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/annotation.go new file mode 100644 index 0000000000..121e995235 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/annotation.go @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +import "github.com/spdx/tools-golang/spdx/common" + +// Annotation is an Annotation section of an SPDX Document for version 2.3 of the spec. +type Annotation struct { + // 12.1: Annotator + // Cardinality: conditional (mandatory, one) if there is an Annotation + Annotator common.Annotator `json:"annotator"` + + // 12.2: Annotation Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationDate string `json:"annotationDate"` + + // 12.3: Annotation Type: "REVIEW" or "OTHER" + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationType string `json:"annotationType"` + + // 12.4: SPDX Identifier Reference + // Cardinality: conditional (mandatory, one) if there is an Annotation + // This field is not used in hierarchical data formats where the referenced element is clear, such as JSON or YAML. + AnnotationSPDXIdentifier common.DocElementID `json:"-" yaml:"-"` + + // 12.5: Annotation Comment + // Cardinality: conditional (mandatory, one) if there is an Annotation + AnnotationComment string `json:"comment"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/creation_info.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/creation_info.go new file mode 100644 index 0000000000..33b2caf070 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/creation_info.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +import "github.com/spdx/tools-golang/spdx/common" + +// CreationInfo is a Document Creation Information section of an +// SPDX Document for version 2.3 of the spec. +type CreationInfo struct { + // 6.7: License List Version + // Cardinality: optional, one + LicenseListVersion string `json:"licenseListVersion"` + + // 6.8: Creators: may have multiple keys for Person, Organization + // and/or Tool + // Cardinality: mandatory, one or many + Creators []common.Creator `json:"creators"` + + // 6.9: Created: data format YYYY-MM-DDThh:mm:ssZ + // Cardinality: mandatory, one + Created string `json:"created"` + + // 6.10: Creator Comment + // Cardinality: optional, one + CreatorComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/document.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/document.go new file mode 100644 index 0000000000..32fdb8db84 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/document.go @@ -0,0 +1,65 @@ +// Package spdx contains the struct definition for an SPDX Document +// and its constituent parts. +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later +package v2_3 + +import "github.com/spdx/tools-golang/spdx/common" + +// ExternalDocumentRef is a reference to an external SPDX document +// as defined in section 6.6 for version 2.3 of the spec. +type ExternalDocumentRef struct { + // DocumentRefID is the ID string defined in the start of the + // reference. It should _not_ contain the "DocumentRef-" part + // of the mandatory ID string. + DocumentRefID string `json:"externalDocumentId"` + + // URI is the URI defined for the external document + URI string `json:"spdxDocument"` + + // Checksum is the actual hash data + Checksum common.Checksum `json:"checksum"` +} + +// Document is an SPDX Document for version 2.3 of the spec. +// See https://spdx.github.io/spdx-spec/v2.3/document-creation-information +type Document struct { + // 6.1: SPDX Version; should be in the format "SPDX-2.3" + // Cardinality: mandatory, one + SPDXVersion string `json:"spdxVersion"` + + // 6.2: Data License; should be "CC0-1.0" + // Cardinality: mandatory, one + DataLicense string `json:"dataLicense"` + + // 6.3: SPDX Identifier; should be "DOCUMENT" to represent + // mandatory identifier of SPDXRef-DOCUMENT + // Cardinality: mandatory, one + SPDXIdentifier common.ElementID `json:"SPDXID"` + + // 6.4: Document Name + // Cardinality: mandatory, one + DocumentName string `json:"name"` + + // 6.5: Document Namespace + // Cardinality: mandatory, one + DocumentNamespace string `json:"documentNamespace"` + + // 6.6: External Document References + // Cardinality: optional, one or many + ExternalDocumentReferences []ExternalDocumentRef `json:"externalDocumentRefs,omitempty"` + + // 6.11: Document Comment + // Cardinality: optional, one + DocumentComment string `json:"comment,omitempty"` + + CreationInfo *CreationInfo `json:"creationInfo"` + Packages []*Package `json:"packages,omitempty"` + Files []*File `json:"files,omitempty"` + OtherLicenses []*OtherLicense `json:"hasExtractedLicensingInfos,omitempty"` + Relationships []*Relationship `json:"relationships,omitempty"` + Annotations []*Annotation `json:"annotations,omitempty"` + Snippets []Snippet `json:"snippets,omitempty"` + + // DEPRECATED in version 2.0 of spec + Reviews []*Review `json:"-" yaml:"-"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/file.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/file.go new file mode 100644 index 0000000000..c472fdb2fc --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/file.go @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +import "github.com/spdx/tools-golang/spdx/common" + +// File is a File section of an SPDX Document for version 2.3 of the spec. +type File struct { + // 8.1: File Name + // Cardinality: mandatory, one + FileName string `json:"fileName"` + + // 8.2: File SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + FileSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 8.3: File Types + // Cardinality: optional, multiple + FileTypes []string `json:"fileTypes,omitempty"` + + // 8.4: File Checksum: may have keys for SHA1, SHA256, MD5, SHA3-256, SHA3-384, SHA3-512, BLAKE2b-256, BLAKE2b-384, BLAKE2b-512, BLAKE3, ADLER32 + // Cardinality: mandatory, one SHA1, others may be optionally provided + Checksums []common.Checksum `json:"checksums"` + + // 8.5: Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one + LicenseConcluded string `json:"licenseConcluded,omitempty"` + + // 8.6: License Information in File: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one or many + LicenseInfoInFiles []string `json:"licenseInfoInFiles,omitempty"` + + // 8.7: Comments on License + // Cardinality: optional, one + LicenseComments string `json:"licenseComments,omitempty"` + + // 8.8: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + FileCopyrightText string `json:"copyrightText"` + + // DEPRECATED in version 2.1 of spec + // 8.9-8.11: Artifact of Project variables (defined below) + // Cardinality: optional, one or many + ArtifactOfProjects []*ArtifactOfProject `json:"artifactOfs,omitempty"` + + // 8.12: File Comment + // Cardinality: optional, one + FileComment string `json:"comment,omitempty"` + + // 8.13: File Notice + // Cardinality: optional, one + FileNotice string `json:"noticeText,omitempty"` + + // 8.14: File Contributor + // Cardinality: optional, one or many + FileContributors []string `json:"fileContributors,omitempty"` + + // 8.15: File Attribution Text + // Cardinality: optional, one or many + FileAttributionTexts []string `json:"attributionTexts,omitempty"` + + // DEPRECATED in version 2.0 of spec + // 8.16: File Dependencies + // Cardinality: optional, one or many + FileDependencies []string `json:"fileDependencies,omitempty"` + + // Snippets contained in this File + // Note that Snippets could be defined in a different Document! However, + // the only ones that _THIS_ document can contain are this ones that are + // defined here -- so this should just be an ElementID. + Snippets map[common.ElementID]*Snippet `json:"-" yaml:"-"` + + Annotations []Annotation `json:"annotations,omitempty"` +} + +// ArtifactOfProject is a DEPRECATED collection of data regarding +// a Package, as defined in sections 8.9-8.11 in version 2.3 of the spec. +// NOTE: the JSON schema does not define the structure of this object: +// https://github.com/spdx/spdx-spec/blob/development/v2.3.1/schemas/spdx-schema.json#L480 +type ArtifactOfProject struct { + + // DEPRECATED in version 2.1 of spec + // 8.9: Artifact of Project Name + // Cardinality: conditional, required if present, one per AOP + Name string `json:"name"` + + // DEPRECATED in version 2.1 of spec + // 8.10: Artifact of Project Homepage: URL or "UNKNOWN" + // Cardinality: optional, one per AOP + HomePage string `json:"homePage"` + + // DEPRECATED in version 2.1 of spec + // 8.11: Artifact of Project Uniform Resource Identifier + // Cardinality: optional, one per AOP + URI string `json:"URI"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/other_license.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/other_license.go new file mode 100644 index 0000000000..363bb41253 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/other_license.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +// OtherLicense is an Other License Information section of an +// SPDX Document for version 2.3 of the spec. +type OtherLicense struct { + // 10.1: License Identifier: "LicenseRef-[idstring]" + // Cardinality: conditional (mandatory, one) if license is not + // on SPDX License List + LicenseIdentifier string `json:"licenseId"` + + // 10.2: Extracted Text + // Cardinality: conditional (mandatory, one) if there is a + // License Identifier assigned + ExtractedText string `json:"extractedText"` + + // 10.3: License Name: single line of text or "NOASSERTION" + // Cardinality: conditional (mandatory, one) if license is not + // on SPDX License List + LicenseName string `json:"name,omitempty"` + + // 10.4: License Cross Reference + // Cardinality: conditional (optional, one or many) if license + // is not on SPDX License List + LicenseCrossReferences []string `json:"seeAlsos,omitempty"` + + // 10.5: License Comment + // Cardinality: optional, one + LicenseComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/package.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/package.go new file mode 100644 index 0000000000..b9d5b9515b --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/package.go @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +import "github.com/spdx/tools-golang/spdx/common" + +// Package is a Package section of an SPDX Document for version 2.3 of the spec. +type Package struct { + // NOT PART OF SPEC + // flag: does this "package" contain files that were in fact "unpackaged", + // e.g. included directly in the Document without being in a Package? + IsUnpackaged bool `json:"-" yaml:"-"` + + // 7.1: Package Name + // Cardinality: mandatory, one + PackageName string `json:"name"` + + // 7.2: Package SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + PackageSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 7.3: Package Version + // Cardinality: optional, one + PackageVersion string `json:"versionInfo,omitempty"` + + // 7.4: Package File Name + // Cardinality: optional, one + PackageFileName string `json:"packageFileName,omitempty"` + + // 7.5: Package Supplier: may have single result for either Person or Organization, + // or NOASSERTION + // Cardinality: optional, one + PackageSupplier *common.Supplier `json:"supplier,omitempty"` + + // 7.6: Package Originator: may have single result for either Person or Organization, + // or NOASSERTION + // Cardinality: optional, one + PackageOriginator *common.Originator `json:"originator,omitempty"` + + // 7.7: Package Download Location + // Cardinality: mandatory, one + PackageDownloadLocation string `json:"downloadLocation"` + + // 7.8: FilesAnalyzed + // Cardinality: optional, one; default value is "true" if omitted + FilesAnalyzed bool `json:"filesAnalyzed,omitempty"` + // NOT PART OF SPEC: did FilesAnalyzed tag appear? + IsFilesAnalyzedTagPresent bool `json:"-" yaml:"-"` + + // 7.9: Package Verification Code + // Cardinality: if FilesAnalyzed == true must be present, if FilesAnalyzed == false must be omitted + PackageVerificationCode *common.PackageVerificationCode `json:"packageVerificationCode,omitempty"` + + // 7.10: Package Checksum: may have keys for SHA1, SHA256, SHA512, MD5, SHA3-256, SHA3-384, SHA3-512, BLAKE2b-256, BLAKE2b-384, BLAKE2b-512, BLAKE3, ADLER32 + // Cardinality: optional, one or many + PackageChecksums []common.Checksum `json:"checksums,omitempty"` + + // 7.11: Package Home Page + // Cardinality: optional, one + PackageHomePage string `json:"homepage,omitempty"` + + // 7.12: Source Information + // Cardinality: optional, one + PackageSourceInfo string `json:"sourceInfo,omitempty"` + + // 7.13: Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one + PackageLicenseConcluded string `json:"licenseConcluded,omitempty"` + + // 7.14: All Licenses Info from Files: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one or many if filesAnalyzed is true / omitted; + // zero (must be omitted) if filesAnalyzed is false + PackageLicenseInfoFromFiles []string `json:"licenseInfoFromFiles,omitempty"` + + // 7.15: Declared License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one + PackageLicenseDeclared string `json:"licenseDeclared,omitempty"` + + // 7.16: Comments on License + // Cardinality: optional, one + PackageLicenseComments string `json:"licenseComments,omitempty"` + + // 7.17: Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + PackageCopyrightText string `json:"copyrightText"` + + // 7.18: Package Summary Description + // Cardinality: optional, one + PackageSummary string `json:"summary,omitempty"` + + // 7.19: Package Detailed Description + // Cardinality: optional, one + PackageDescription string `json:"description,omitempty"` + + // 7.20: Package Comment + // Cardinality: optional, one + PackageComment string `json:"comment,omitempty"` + + // 7.21: Package External Reference + // Cardinality: optional, one or many + PackageExternalReferences []*PackageExternalReference `json:"externalRefs,omitempty"` + + // 7.22: Package External Reference Comment + // Cardinality: conditional (optional, one) for each External Reference + // contained within PackageExternalReference2_1 struct, if present + + // 7.23: Package Attribution Text + // Cardinality: optional, one or many + PackageAttributionTexts []string `json:"attributionTexts,omitempty"` + + // 7.24: Primary Package Purpose + // Cardinality: optional, one or many + // Allowed values: APPLICATION, FRAMEWORK, LIBRARY, CONTAINER, OPERATING-SYSTEM, DEVICE, FIRMWARE, SOURCE, ARCHIVE, FILE, INSTALL, OTHER + PrimaryPackagePurpose string `json:"primaryPackagePurpose,omitempty"` + + // 7.25: Release Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: optional, one + ReleaseDate string `json:"releaseDate,omitempty"` + + // 7.26: Build Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: optional, one + BuiltDate string `json:"builtDate,omitempty"` + + // 7.27: Valid Until Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: optional, one + ValidUntilDate string `json:"validUntilDate,omitempty"` + + // Files contained in this Package + Files []*File `json:"files,omitempty"` + + Annotations []Annotation `json:"annotations,omitempty"` +} + +// PackageExternalReference is an External Reference to additional info +// about a Package, as defined in section 7.21 in version 2.3 of the spec. +type PackageExternalReference struct { + // category is "SECURITY", "PACKAGE-MANAGER" or "OTHER" + Category string `json:"referenceCategory"` + + // type is an [idstring] as defined in Appendix VI; + // called RefType here due to "type" being a Golang keyword + RefType string `json:"referenceType"` + + // locator is a unique string to access the package-specific + // info, metadata or content within the target location + Locator string `json:"referenceLocator"` + + // 7.22: Package External Reference Comment + // Cardinality: conditional (optional, one) for each External Reference + ExternalRefComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/relationship.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/relationship.go new file mode 100644 index 0000000000..af4c07d164 --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/relationship.go @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +import "github.com/spdx/tools-golang/spdx/common" + +// Relationship is a Relationship section of an SPDX Document for +// version 2.3 of the spec. +type Relationship struct { + + // 11.1: Relationship + // Cardinality: optional, one or more; one per Relationship + // one mandatory for SPDX Document with multiple packages + // RefA and RefB are first and second item + // Relationship is type from 11.1.1 + RefA common.DocElementID `json:"spdxElementId"` + RefB common.DocElementID `json:"relatedSpdxElement"` + Relationship string `json:"relationshipType"` + + // 11.2: Relationship Comment + // Cardinality: optional, one + RelationshipComment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/review.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/review.go new file mode 100644 index 0000000000..0463807fbd --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/review.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +// Review is a Review section of an SPDX Document for version 2.3 of the spec. +// DEPRECATED in version 2.0 of spec; retained here for compatibility. +type Review struct { + + // DEPRECATED in version 2.0 of spec + // 13.1: Reviewer + // Cardinality: optional, one + Reviewer string + // including AnnotatorType: one of "Person", "Organization" or "Tool" + ReviewerType string + + // DEPRECATED in version 2.0 of spec + // 13.2: Review Date: YYYY-MM-DDThh:mm:ssZ + // Cardinality: conditional (mandatory, one) if there is a Reviewer + ReviewDate string + + // DEPRECATED in version 2.0 of spec + // 13.3: Review Comment + // Cardinality: optional, one + ReviewComment string +} diff --git a/vendor/github.com/spdx/tools-golang/spdx/v2_3/snippet.go b/vendor/github.com/spdx/tools-golang/spdx/v2_3/snippet.go new file mode 100644 index 0000000000..d55a1a968f --- /dev/null +++ b/vendor/github.com/spdx/tools-golang/spdx/v2_3/snippet.go @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later + +package v2_3 + +import "github.com/spdx/tools-golang/spdx/common" + +// Snippet is a Snippet section of an SPDX Document for version 2.3 of the spec. +type Snippet struct { + + // 9.1: Snippet SPDX Identifier: "SPDXRef-[idstring]" + // Cardinality: mandatory, one + SnippetSPDXIdentifier common.ElementID `json:"SPDXID"` + + // 9.2: Snippet from File SPDX Identifier + // Cardinality: mandatory, one + SnippetFromFileSPDXIdentifier common.ElementID `json:"snippetFromFile"` + + // Ranges denotes the start/end byte offsets or line numbers that the snippet is relevant to + Ranges []common.SnippetRange `json:"ranges"` + + // 9.5: Snippet Concluded License: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one + SnippetLicenseConcluded string `json:"licenseConcluded,omitempty"` + + // 9.6: License Information in Snippet: SPDX License Expression, "NONE" or "NOASSERTION" + // Cardinality: optional, one or many + LicenseInfoInSnippet []string `json:"licenseInfoInSnippets,omitempty"` + + // 9.7: Snippet Comments on License + // Cardinality: optional, one + SnippetLicenseComments string `json:"licenseComments,omitempty"` + + // 9.8: Snippet Copyright Text: copyright notice(s) text, "NONE" or "NOASSERTION" + // Cardinality: mandatory, one + SnippetCopyrightText string `json:"copyrightText"` + + // 9.9: Snippet Comment + // Cardinality: optional, one + SnippetComment string `json:"comment,omitempty"` + + // 9.10: Snippet Name + // Cardinality: optional, one + SnippetName string `json:"name,omitempty"` + + // 9.11: Snippet Attribution Text + // Cardinality: optional, one or many + SnippetAttributionTexts []string `json:"-" yaml:"-"` +} diff --git a/vendor/github.com/tonistiigi/fsutil/Dockerfile b/vendor/github.com/tonistiigi/fsutil/Dockerfile index 8ea4b426e5..252b497638 100644 --- a/vendor/github.com/tonistiigi/fsutil/Dockerfile +++ b/vendor/github.com/tonistiigi/fsutil/Dockerfile @@ -1,29 +1,30 @@ -#syntax=docker/dockerfile:1.2 -ARG GO_VERSION=1.16 +#syntax=docker/dockerfile:1 +ARG GO_VERSION=1.18 -FROM --platform=amd64 tonistiigi/xx:golang AS goxx +FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.1.0 AS xx FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS base -RUN apk add --no-cache gcc musl-dev -COPY --from=goxx / / +RUN apk add --no-cache git +COPY --from=xx / / WORKDIR /src FROM base AS build ARG TARGETPLATFORM -RUN --mount=target=. \ +RUN --mount=target=. --mount=target=/go/pkg/mod,type=cache \ --mount=target=/root/.cache,type=cache \ - go build ./... + xx-go build ./... FROM base AS test -RUN --mount=target=. \ +ARG TESTFLAGS +RUN --mount=target=. --mount=target=/go/pkg/mod,type=cache \ --mount=target=/root/.cache,type=cache \ - go test -test.v ./... + CGO_ENABLED=0 xx-go test -test.v ${TESTFLAGS} ./... FROM base AS test-noroot RUN mkdir /go/pkg && chmod 0777 /go/pkg USER 1000:1000 RUN --mount=target=. \ --mount=target=/tmp/.cache,type=cache \ - GOCACHE=/tmp/gocache go test -test.v ./... + CGO_ENABLED=0 GOCACHE=/tmp/gocache xx-go test -test.v ./... FROM build diff --git a/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go b/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go index 74f08a15ca..dd65a49ad1 100644 --- a/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go +++ b/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package fsutil diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy.go b/vendor/github.com/tonistiigi/fsutil/copy/copy.go index 41b82c32da..558c553f7c 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy.go @@ -2,7 +2,6 @@ package fs import ( "context" - "io/ioutil" "os" "path" "path/filepath" @@ -12,7 +11,7 @@ import ( "time" "github.com/containerd/continuity/fs" - "github.com/docker/docker/pkg/fileutils" + "github.com/moby/patternmatcher" "github.com/pkg/errors" "github.com/tonistiigi/fsutil" ) @@ -115,7 +114,7 @@ func Copy(ctx context.Context, srcRoot, src, dstRoot, dst string, opts ...Opt) e if err != nil { return err } - if err := c.copy(ctx, srcFollowed, "", dst, false, fileutils.MatchInfo{}, fileutils.MatchInfo{}); err != nil { + if err := c.copy(ctx, srcFollowed, "", dst, false, patternmatcher.MatchInfo{}, patternmatcher.MatchInfo{}); err != nil { return err } } @@ -154,6 +153,7 @@ func (c *copier) prepareTargetDir(srcFollowed, src, destPath string, copyDirCont type User struct { UID, GID int + SID string } type Chowner func(*User) (*User, error) @@ -232,8 +232,8 @@ type copier struct { mode *int inodes map[uint64]string xattrErrorHandler XAttrErrorHandler - includePatternMatcher *fileutils.PatternMatcher - excludePatternMatcher *fileutils.PatternMatcher + includePatternMatcher *patternmatcher.PatternMatcher + excludePatternMatcher *patternmatcher.PatternMatcher parentDirs []parentDir changefn fsutil.ChangeFunc root string @@ -252,19 +252,19 @@ func newCopier(root string, chown Chowner, tm *time.Time, mode *int, xeh XAttrEr } } - var includePatternMatcher *fileutils.PatternMatcher + var includePatternMatcher *patternmatcher.PatternMatcher if len(includePatterns) != 0 { var err error - includePatternMatcher, err = fileutils.NewPatternMatcher(includePatterns) + includePatternMatcher, err = patternmatcher.New(includePatterns) if err != nil { return nil, errors.Wrapf(err, "invalid includepatterns: %s", includePatterns) } } - var excludePatternMatcher *fileutils.PatternMatcher + var excludePatternMatcher *patternmatcher.PatternMatcher if len(excludePatterns) != 0 { var err error - excludePatternMatcher, err = fileutils.NewPatternMatcher(excludePatterns) + excludePatternMatcher, err = patternmatcher.New(excludePatterns) if err != nil { return nil, errors.Wrapf(err, "invalid excludepatterns: %s", excludePatterns) } @@ -284,7 +284,7 @@ func newCopier(root string, chown Chowner, tm *time.Time, mode *int, xeh XAttrEr } // dest is always clean -func (c *copier) copy(ctx context.Context, src, srcComponents, target string, overwriteTargetMetadata bool, parentIncludeMatchInfo, parentExcludeMatchInfo fileutils.MatchInfo) error { +func (c *copier) copy(ctx context.Context, src, srcComponents, target string, overwriteTargetMetadata bool, parentIncludeMatchInfo, parentExcludeMatchInfo patternmatcher.MatchInfo) error { select { case <-ctx.Done(): return ctx.Err() @@ -295,11 +295,15 @@ func (c *copier) copy(ctx context.Context, src, srcComponents, target string, ov if err != nil { return errors.Wrapf(err, "failed to stat %s", src) } + targetFi, err := os.Lstat(target) + if err != nil && !os.IsNotExist(err) { + return errors.Wrapf(err, "failed to stat %s", src) + } include := true var ( - includeMatchInfo fileutils.MatchInfo - excludeMatchInfo fileutils.MatchInfo + includeMatchInfo patternmatcher.MatchInfo + excludeMatchInfo patternmatcher.MatchInfo ) if srcComponents != "" { matchesIncludePattern := false @@ -335,7 +339,8 @@ func (c *copier) copy(ctx context.Context, src, srcComponents, target string, ov } } - copyFileInfo := true + copyFileInfo := include + restoreFileTimestamp := false notify := true switch { @@ -345,8 +350,12 @@ func (c *copier) copy(ctx context.Context, src, srcComponents, target string, ov include, includeMatchInfo, excludeMatchInfo, ); err != nil { return err - } else if !overwriteTargetMetadata || c.includePatternMatcher != nil { + } else if !overwriteTargetMetadata { + // if we aren't supposed to overwrite existing target metadata, + // then we only need to copy the new file info if we newly created + // it, or restore the previous file timestamp if not copyFileInfo = created + restoreFileTimestamp = !created } notify = false case (fi.Mode() & os.ModeType) == 0: @@ -369,23 +378,26 @@ func (c *copier) copy(ctx context.Context, src, srcComponents, target string, ov if err := os.Symlink(link, target); err != nil { return errors.Wrapf(err, "failed to create symlink: %s", target) } - case (fi.Mode() & os.ModeDevice) == os.ModeDevice: + case (fi.Mode() & os.ModeDevice) == os.ModeDevice, + (fi.Mode() & os.ModeNamedPipe) == os.ModeNamedPipe, + (fi.Mode() & os.ModeSocket) == os.ModeSocket: if err := copyDevice(target, fi); err != nil { return errors.Wrapf(err, "failed to create device") } - default: - // TODO: Support pipes and sockets - return errors.Wrapf(err, "unsupported mode %s", fi.Mode()) } if copyFileInfo { - if err := c.copyFileInfo(fi, target); err != nil { + if err := c.copyFileInfo(fi, src, target); err != nil { return errors.Wrap(err, "failed to copy file info") } if err := copyXAttrs(target, src, c.xattrErrorHandler); err != nil { return errors.Wrap(err, "failed to copy xattrs") } + } else if restoreFileTimestamp && targetFi != nil { + if err := c.copyFileTimestamp(fi, target); err != nil { + return errors.Wrap(err, "failed to restore file timestamp") + } } if notify { if err := c.notifyChange(target, fi); err != nil { @@ -404,9 +416,9 @@ func (c *copier) notifyChange(target string, fi os.FileInfo) error { return nil } -func (c *copier) include(path string, fi os.FileInfo, parentIncludeMatchInfo fileutils.MatchInfo) (bool, fileutils.MatchInfo, error) { +func (c *copier) include(path string, fi os.FileInfo, parentIncludeMatchInfo patternmatcher.MatchInfo) (bool, patternmatcher.MatchInfo, error) { if c.includePatternMatcher == nil { - return true, fileutils.MatchInfo{}, nil + return true, patternmatcher.MatchInfo{}, nil } m, matchInfo, err := c.includePatternMatcher.MatchesUsingParentResults(path, parentIncludeMatchInfo) @@ -416,9 +428,9 @@ func (c *copier) include(path string, fi os.FileInfo, parentIncludeMatchInfo fil return m, matchInfo, nil } -func (c *copier) exclude(path string, fi os.FileInfo, parentExcludeMatchInfo fileutils.MatchInfo) (bool, fileutils.MatchInfo, error) { +func (c *copier) exclude(path string, fi os.FileInfo, parentExcludeMatchInfo patternmatcher.MatchInfo) (bool, patternmatcher.MatchInfo, error) { if c.excludePatternMatcher == nil { - return false, fileutils.MatchInfo{}, nil + return false, patternmatcher.MatchInfo{}, nil } m, matchInfo, err := c.excludePatternMatcher.MatchesUsingParentResults(path, parentExcludeMatchInfo) @@ -449,7 +461,7 @@ func (c *copier) createParentDirs(src, srcComponents, target string, overwriteTa return err } if created { - if err := c.copyFileInfo(fi, parentDir.dstPath); err != nil { + if err := c.copyFileInfo(fi, parentDir.srcPath, parentDir.dstPath); err != nil { return errors.Wrap(err, "failed to copy file info") } @@ -471,8 +483,8 @@ func (c *copier) copyDirectory( stat os.FileInfo, overwriteTargetMetadata bool, include bool, - includeMatchInfo fileutils.MatchInfo, - excludeMatchInfo fileutils.MatchInfo, + includeMatchInfo patternmatcher.MatchInfo, + excludeMatchInfo patternmatcher.MatchInfo, ) (bool, error) { if !stat.IsDir() { return false, errors.Errorf("source is not directory") @@ -509,7 +521,7 @@ func (c *copier) copyDirectory( c.parentDirs = c.parentDirs[:len(c.parentDirs)-1] }() - fis, err := ioutil.ReadDir(src) + fis, err := os.ReadDir(src) if err != nil { return false, errors.Wrapf(err, "failed to read %s", src) } diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_darwin.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_darwin.go index 0d8149693a..bc93b21ced 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy_darwin.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_darwin.go @@ -1,3 +1,4 @@ +//go:build darwin // +build darwin package fs @@ -40,3 +41,7 @@ func copyFileContent(dst, src *os.File) error { return err } + +func mknod(dst string, mode uint32, rDev int) error { + return unix.Mknod(dst, uint32(mode), rDev) +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_freebsd.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_freebsd.go index 297a2c0335..1b9dbb3d00 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy_freebsd.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package fs @@ -7,6 +8,7 @@ import ( "os" "github.com/pkg/errors" + "golang.org/x/sys/unix" ) func copyFile(source, target string) error { @@ -30,3 +32,7 @@ func copyFileContent(dst, src *os.File) error { bufferPool.Put(buf) return err } + +func mknod(dst string, mode uint32, rDev int) error { + return unix.Mknod(dst, uint32(mode), uint64(rDev)) +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go index 01878525cf..971cb5c5d4 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go @@ -15,9 +15,7 @@ func getUIDGID(fi os.FileInfo) (uid, gid int) { return int(st.Uid), int(st.Gid) } -func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { - st := fi.Sys().(*syscall.Stat_t) - +func (c *copier) copyFileInfo(fi os.FileInfo, src, name string) error { chown := c.chown uid, gid := getUIDGID(fi) old := &User{UID: uid, GID: gid} @@ -40,20 +38,26 @@ func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { } } - if c.utime != nil { - if err := Utimes(name, c.utime); err != nil { - return err - } - } else { - timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))} - if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrapf(err, "failed to utime %s", name) - } + if err := c.copyFileTimestamp(fi, name); err != nil { + return err } return nil } +func (c *copier) copyFileTimestamp(fi os.FileInfo, name string) error { + if c.utime != nil { + return Utimes(name, c.utime) + } + + st := fi.Sys().(*syscall.Stat_t) + timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) + } + return nil +} + func copyFile(source, target string) error { src, err := os.Open(source) if err != nil { @@ -109,10 +113,6 @@ func copyFileContent(dst, src *os.File) error { return nil } -func copyDevice(dst string, fi os.FileInfo) error { - st, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") - } - return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +func mknod(dst string, mode uint32, rDev int) error { + return unix.Mknod(dst, uint32(mode), rDev) } diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go index cbd784e5f5..382fe201c1 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go @@ -1,8 +1,12 @@ +//go:build !windows // +build !windows package fs import ( + "os" + "syscall" + "github.com/pkg/errors" "github.com/containerd/continuity/sysx" @@ -26,3 +30,17 @@ func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { return nil } + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + var rDev int + if fi.Mode()&os.ModeDevice == os.ModeDevice || fi.Mode()&os.ModeCharDevice == os.ModeCharDevice { + rDev = int(st.Rdev) + } + mode := st.Mode + mode &^= syscall.S_IFSOCK // socket copied as stub + return mknod(dst, uint32(mode), rDev) +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go index 22281ba5dd..945e96c5f2 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go @@ -16,8 +16,7 @@ func getUIDGID(fi os.FileInfo) (uid, gid int) { return int(st.Uid), int(st.Gid) } -func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { - st := fi.Sys().(*syscall.Stat_t) +func (c *copier) copyFileInfo(fi os.FileInfo, src, name string) error { chown := c.chown uid, gid := getUIDGID(fi) old := &User{UID: uid, GID: gid} @@ -40,15 +39,21 @@ func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { } } - if c.utime != nil { - if err := Utimes(name, c.utime); err != nil { - return err - } - } else { - timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))} - if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrapf(err, "failed to utime %s", name) - } + if err := c.copyFileTimestamp(fi, name); err != nil { + return err + } + return nil +} + +func (c *copier) copyFileTimestamp(fi os.FileInfo, name string) error { + if c.utime != nil { + return Utimes(name, c.utime) + } + + st := fi.Sys().(*syscall.Stat_t) + timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) } return nil } diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go index 330c0e3f2c..19a44a752f 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go @@ -4,14 +4,60 @@ import ( "io" "os" + "github.com/Microsoft/go-winio" "github.com/pkg/errors" + "golang.org/x/sys/windows" ) -func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { +const ( + seTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" +) + +func (c *copier) copyFileInfo(fi os.FileInfo, src, name string) error { if err := os.Chmod(name, fi.Mode()); err != nil { return errors.Wrapf(err, "failed to chmod %s", name) } + // Copy file ownership and ACL + // We need SeRestorePrivilege and SeTakeOwnershipPrivilege in order + // to restore security info on a file, especially if we're trying to + // apply security info which includes SIDs not necessarily present on + // the host. + privileges := []string{winio.SeRestorePrivilege, seTakeOwnershipPrivilege} + if err := winio.EnableProcessPrivileges(privileges); err != nil { + return err + } + defer winio.DisableProcessPrivileges(privileges) + + secInfo, err := windows.GetNamedSecurityInfo( + src, windows.SE_FILE_OBJECT, + windows.OWNER_SECURITY_INFORMATION|windows.DACL_SECURITY_INFORMATION) + + if err != nil { + return err + } + + dacl, _, err := secInfo.DACL() + if err != nil { + return err + } + + sid, _, err := secInfo.Owner() + if err != nil { + return err + } + + if err := windows.SetNamedSecurityInfo( + name, windows.SE_FILE_OBJECT, + windows.OWNER_SECURITY_INFORMATION|windows.DACL_SECURITY_INFORMATION, + sid, nil, dacl, nil); err != nil { + + return err + } + return nil +} + +func (c *copier) copyFileTimestamp(fi os.FileInfo, name string) error { // TODO: copy windows specific metadata return nil diff --git a/vendor/github.com/tonistiigi/fsutil/copy/device_darwin.go b/vendor/github.com/tonistiigi/fsutil/copy/device_darwin.go deleted file mode 100644 index 8a06d242a4..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/copy/device_darwin.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build darwin -// +build darwin - -package fs - -import ( - "os" - "syscall" - - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func copyDevice(dst string, fi os.FileInfo) error { - st, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") - } - return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) -} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/device_freebsd.go b/vendor/github.com/tonistiigi/fsutil/copy/device_freebsd.go deleted file mode 100644 index 64a2fe4da3..0000000000 --- a/vendor/github.com/tonistiigi/fsutil/copy/device_freebsd.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build freebsd || solaris -// +build freebsd solaris - -package fs - -import ( - "os" - "syscall" - - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func copyDevice(dst string, fi os.FileInfo) error { - st, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return errors.New("unsupported stat type") - } - return unix.Mknod(dst, uint32(fi.Mode()), st.Rdev) -} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go b/vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go index 3b825c940b..a02c5a5857 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package fs diff --git a/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go b/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go index 9854754475..9553c08be3 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go @@ -4,26 +4,8 @@ import ( "os" "syscall" "time" - - "github.com/pkg/errors" ) -func Chown(p string, old *User, fn Chowner) error { - if fn == nil { - return nil - } - user, err := fn(old) - if err != nil { - return errors.WithStack(err) - } - if user != nil { - if err := os.Lchown(p, user.UID, user.GID); err != nil { - return err - } - } - return nil -} - // MkdirAll is forked os.MkdirAll func MkdirAll(path string, perm os.FileMode, user Chowner, tm *time.Time) error { // Fast path: if we can tell whether path is a directory or file, stop with success or error. diff --git a/vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go index 8fb0f6bc60..8bc5711bf0 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go @@ -1,8 +1,10 @@ +//go:build !windows // +build !windows package fs import ( + "os" "time" "github.com/pkg/errors" @@ -30,3 +32,19 @@ func Utimes(p string, tm *time.Time) error { return nil } + +func Chown(p string, old *User, fn Chowner) error { + if fn == nil { + return nil + } + user, err := fn(old) + if err != nil { + return errors.WithStack(err) + } + if user != nil { + if err := os.Lchown(p, user.UID, user.GID); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go index 6bd17e8133..6edb1f5f7f 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go @@ -1,10 +1,21 @@ +//go:build windows // +build windows package fs import ( + "fmt" "os" + "syscall" "time" + + "github.com/Microsoft/go-winio" + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +const ( + containerAdministratorSidString = "S-1-5-93-2-1" ) func fixRootDirectory(p string) string { @@ -19,3 +30,64 @@ func fixRootDirectory(p string) string { func Utimes(p string, tm *time.Time) error { return nil } + +func Chown(p string, old *User, fn Chowner) error { + if fn == nil { + return nil + } + user, err := fn(old) + if err != nil { + return errors.WithStack(err) + } + + userSIDstring := user.SID + if userSIDstring == "" { + userSIDstring = containerAdministratorSidString + + } + // Copy file ownership and ACL + // We need SeRestorePrivilege and SeTakeOwnershipPrivilege in order + // to restore security info on a file, especially if we're trying to + // apply security info which includes SIDs not necessarily present on + // the host. + privileges := []string{winio.SeRestorePrivilege, seTakeOwnershipPrivilege} + if err := winio.EnableProcessPrivileges(privileges); err != nil { + return err + } + defer winio.DisableProcessPrivileges(privileges) + + sidPtr, err := syscall.UTF16PtrFromString(userSIDstring) + if err != nil { + return errors.Wrap(err, "converting to utf16 ptr") + } + var userSID *windows.SID + if err := windows.ConvertStringSidToSid(sidPtr, &userSID); err != nil { + return errors.Wrap(err, "converting to windows SID") + } + var dacl *windows.ACL + newEntries := []windows.EXPLICIT_ACCESS{ + { + AccessPermissions: windows.GENERIC_ALL, + AccessMode: windows.GRANT_ACCESS, + Inheritance: windows.SUB_CONTAINERS_AND_OBJECTS_INHERIT, + Trustee: windows.TRUSTEE{ + TrusteeForm: windows.TRUSTEE_IS_SID, + TrusteeValue: windows.TrusteeValueFromSID(userSID), + }, + }, + } + newAcl, err := windows.ACLFromEntries(newEntries, dacl) + if err != nil { + return fmt.Errorf("adding acls: %w", err) + } + + if err := windows.SetNamedSecurityInfo( + p, windows.SE_FILE_OBJECT, + windows.OWNER_SECURITY_INFORMATION|windows.DACL_SECURITY_INFORMATION, + userSID, nil, newAcl, nil); err != nil { + + return err + } + + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/stat_sysv.go b/vendor/github.com/tonistiigi/fsutil/copy/stat_sysv.go index 59accf054d..31ea3d9419 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/stat_sysv.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/stat_sysv.go @@ -1,3 +1,4 @@ +//go:build dragonfly || linux || solaris // +build dragonfly linux solaris package fs diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter.go b/vendor/github.com/tonistiigi/fsutil/diskwriter.go index 786432264f..b822644ddc 100644 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter.go +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter.go @@ -4,6 +4,7 @@ import ( "context" "hash" "io" + gofs "io/fs" "os" "path/filepath" "strconv" @@ -33,10 +34,11 @@ type DiskWriter struct { opt DiskWriterOpt dest string - ctx context.Context - cancel func() - eg *errgroup.Group - filter FilterFunc + ctx context.Context + cancel func() + eg *errgroup.Group + filter FilterFunc + dirModTimes map[string]int64 } func NewDiskWriter(ctx context.Context, dest string, opt DiskWriterOpt) (*DiskWriter, error) { @@ -51,17 +53,32 @@ func NewDiskWriter(ctx context.Context, dest string, opt DiskWriterOpt) (*DiskWr eg, ctx := errgroup.WithContext(ctx) return &DiskWriter{ - opt: opt, - dest: dest, - eg: eg, - ctx: ctx, - cancel: cancel, - filter: opt.Filter, + opt: opt, + dest: dest, + eg: eg, + ctx: ctx, + cancel: cancel, + filter: opt.Filter, + dirModTimes: map[string]int64{}, }, nil } func (dw *DiskWriter) Wait(ctx context.Context) error { - return dw.eg.Wait() + if err := dw.eg.Wait(); err != nil { + return err + } + return filepath.WalkDir(dw.dest, func(path string, d gofs.DirEntry, prevErr error) error { + if prevErr != nil { + return prevErr + } + if !d.IsDir() { + return nil + } + if mtime, ok := dw.dirModTimes[path]; ok { + return chtimes(path, mtime) + } + return nil + }) } func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { @@ -147,6 +164,7 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er if err := os.Mkdir(newPath, fi.Mode()); err != nil { return errors.Wrapf(err, "failed to create dir %s", newPath) } + dw.dirModTimes[destPath] = statCopy.ModTime case fi.Mode()&os.ModeDevice != 0 || fi.Mode()&os.ModeNamedPipe != 0: if err := handleTarTypeBlockCharFifo(newPath, &statCopy); err != nil { return errors.Wrapf(err, "failed to create device %s", newPath) @@ -323,10 +341,6 @@ func (lfw *lazyFileWriter) Close() error { return err } -func mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} - // Random number state. // We generate random temporary file names so that there's a good // chance the file doesn't exist yet - keeps the number of tries in diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_freebsd.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_freebsd.go index 6ca00618a1..ed6356fabe 100644 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter_freebsd.go +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_freebsd.go @@ -1,3 +1,4 @@ +//go:build freebsd // +build freebsd package fsutil @@ -8,7 +9,9 @@ import ( ) func createSpecialFile(path string, mode uint32, stat *types.Stat) error { - dev := unix.Mkdev(uint32(stat.Devmajor), uint32(stat.Devminor)) - - return unix.Mknod(path, mode, dev) + return unix.Mknod(path, mode, mkdev(stat.Devmajor, stat.Devminor)) +} + +func mkdev(major int64, minor int64) uint64 { + return unix.Mkdev(uint32(major), uint32(minor)) } diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go index 36bb78895c..1d97d6f9d7 100644 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package fsutil diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_unixnobsd.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_unixnobsd.go index 9f55ad8832..927dba4602 100644 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter_unixnobsd.go +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_unixnobsd.go @@ -1,13 +1,17 @@ +//go:build !windows && !freebsd // +build !windows,!freebsd package fsutil import ( - "syscall" - "github.com/tonistiigi/fsutil/types" + "golang.org/x/sys/unix" ) func createSpecialFile(path string, mode uint32, stat *types.Stat) error { - return syscall.Mknod(path, mode, int(mkdev(stat.Devmajor, stat.Devminor))) + return unix.Mknod(path, mode, mkdev(stat.Devmajor, stat.Devminor)) +} + +func mkdev(major int64, minor int64) int { + return int(unix.Mkdev(uint32(major), uint32(minor))) } diff --git a/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl b/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl index 0d3c54172f..3d7d182c3c 100644 --- a/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl +++ b/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl @@ -1,5 +1,5 @@ variable "GO_VERSION" { - default = "1.16" + default = "1.18" } group "default" { @@ -63,5 +63,5 @@ target "shfmt" { target "cross" { inherits = ["build"] - platforms = ["linux/amd64", "linux/386", "linux/arm64", "linux/arm", "linux/ppc64le", "linux/s390x", "darwin/amd64", "darwin/arm64", "windows/amd64", "freebsd/amd64", "freebsd/arm64"] + platforms = ["linux/amd64", "linux/386", "linux/arm64", "linux/arm", "linux/ppc64le", "linux/s390x", "darwin/amd64", "darwin/arm64", "windows/amd64", "windows/arm64", "freebsd/amd64", "freebsd/arm64"] } diff --git a/vendor/github.com/tonistiigi/fsutil/followlinks.go b/vendor/github.com/tonistiigi/fsutil/followlinks.go index a0942413e8..136a908211 100644 --- a/vendor/github.com/tonistiigi/fsutil/followlinks.go +++ b/vendor/github.com/tonistiigi/fsutil/followlinks.go @@ -1,7 +1,6 @@ package fsutil import ( - "io/ioutil" "os" "path/filepath" "runtime" @@ -75,7 +74,7 @@ func (r *symlinkResolver) readSymlink(p string, allowWildcard bool) ([]string, e realPath := filepath.Join(r.root, p) base := filepath.Base(p) if allowWildcard && containsWildcards(base) { - fis, err := ioutil.ReadDir(filepath.Dir(realPath)) + fis, err := os.ReadDir(filepath.Dir(realPath)) if err != nil { if errors.Is(err, os.ErrNotExist) { return nil, nil diff --git a/vendor/github.com/tonistiigi/fsutil/fs.go b/vendor/github.com/tonistiigi/fsutil/fs.go index e26110b320..db587b77cd 100644 --- a/vendor/github.com/tonistiigi/fsutil/fs.go +++ b/vendor/github.com/tonistiigi/fsutil/fs.go @@ -3,7 +3,6 @@ package fsutil import ( "context" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -103,7 +102,7 @@ func (fs *subDirFS) Walk(ctx context.Context, fn filepath.WalkFunc) error { func (fs *subDirFS) Open(p string) (io.ReadCloser, error) { parts := strings.SplitN(filepath.Clean(p), string(filepath.Separator), 2) if len(parts) == 0 { - return ioutil.NopCloser(&emptyReader{}), nil + return io.NopCloser(&emptyReader{}), nil } d, ok := fs.m[parts[0]] if !ok { diff --git a/vendor/github.com/tonistiigi/fsutil/send.go b/vendor/github.com/tonistiigi/fsutil/send.go index 2c1a3801d5..f1c51b8365 100644 --- a/vendor/github.com/tonistiigi/fsutil/send.go +++ b/vendor/github.com/tonistiigi/fsutil/send.go @@ -135,7 +135,7 @@ func (s *sender) sendFile(h *sendHandle) error { defer f.Close() buf := bufPool.Get().(*[]byte) defer bufPool.Put(buf) - if _, err := io.CopyBuffer(&fileSender{sender: s, id: h.id}, f, *buf); err != nil { + if _, err := io.CopyBuffer(&fileSender{sender: s, id: h.id}, struct{ io.Reader }{f}, *buf); err != nil { return err } } diff --git a/vendor/github.com/tonistiigi/fsutil/stat_unix.go b/vendor/github.com/tonistiigi/fsutil/stat_unix.go index dd0ed45516..5923aefef1 100644 --- a/vendor/github.com/tonistiigi/fsutil/stat_unix.go +++ b/vendor/github.com/tonistiigi/fsutil/stat_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package fsutil diff --git a/vendor/github.com/tonistiigi/fsutil/walker.go b/vendor/github.com/tonistiigi/fsutil/walker.go index 83045bec85..f95101f319 100644 --- a/vendor/github.com/tonistiigi/fsutil/walker.go +++ b/vendor/github.com/tonistiigi/fsutil/walker.go @@ -8,7 +8,7 @@ import ( "syscall" "time" - "github.com/docker/docker/pkg/fileutils" + "github.com/moby/patternmatcher" "github.com/pkg/errors" "github.com/tonistiigi/fsutil/types" ) @@ -19,9 +19,29 @@ type WalkOpt struct { // FollowPaths contains symlinks that are resolved into include patterns // before performing the fs walk FollowPaths []string - Map FilterFunc + Map MapFunc } +type MapFunc func(string, *types.Stat) MapResult + +// The result of the walk function controls +// both how WalkDir continues and whether the path is kept. +type MapResult int + +const ( + // Keep the current path and continue. + MapResultKeep MapResult = iota + + // Exclude the current path and continue. + MapResultExclude + + // Exclude the current path, and skip the rest of the dir. + // If path is a dir, skip the current directory. + // If path is a file, skip the rest of the parent directory. + // (This matches the semantics of fs.SkipDir.) + MapResultSkipDir +) + func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) error { root, err := filepath.EvalSymlinks(p) if err != nil { @@ -37,8 +57,8 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err var ( includePatterns []string - includeMatcher *fileutils.PatternMatcher - excludeMatcher *fileutils.PatternMatcher + includeMatcher *patternmatcher.PatternMatcher + excludeMatcher *patternmatcher.PatternMatcher ) if opt != nil && opt.IncludePatterns != nil { @@ -63,7 +83,7 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err onlyPrefixIncludes := true if len(includePatterns) != 0 { - includeMatcher, err = fileutils.NewPatternMatcher(includePatterns) + includeMatcher, err = patternmatcher.New(includePatterns) if err != nil { return errors.Wrapf(err, "invalid includepatterns: %s", opt.IncludePatterns) } @@ -79,7 +99,7 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err onlyPrefixExcludeExceptions := true if opt != nil && opt.ExcludePatterns != nil { - excludeMatcher, err = fileutils.NewPatternMatcher(opt.ExcludePatterns) + excludeMatcher, err = patternmatcher.New(opt.ExcludePatterns) if err != nil { return errors.Wrapf(err, "invalid excludepatterns: %s", opt.ExcludePatterns) } @@ -97,8 +117,8 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err path string origpath string pathWithSep string - includeMatchInfo fileutils.MatchInfo - excludeMatchInfo fileutils.MatchInfo + includeMatchInfo patternmatcher.MatchInfo + excludeMatchInfo patternmatcher.MatchInfo calledFn bool } @@ -153,7 +173,7 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err skip := false if includeMatcher != nil { - var parentIncludeMatchInfo fileutils.MatchInfo + var parentIncludeMatchInfo patternmatcher.MatchInfo if len(parentDirs) != 0 { parentIncludeMatchInfo = parentDirs[len(parentDirs)-1].includeMatchInfo } @@ -188,7 +208,7 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err } if excludeMatcher != nil { - var parentExcludeMatchInfo fileutils.MatchInfo + var parentExcludeMatchInfo patternmatcher.MatchInfo if len(parentDirs) != 0 { parentExcludeMatchInfo = parentDirs[len(parentDirs)-1].excludeMatchInfo } @@ -258,7 +278,10 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err return ctx.Err() default: if opt != nil && opt.Map != nil { - if allowed := opt.Map(stat.Path, stat); !allowed { + result := opt.Map(stat.Path, stat) + if result == MapResultSkipDir { + return filepath.SkipDir + } else if result == MapResultExclude { return nil } } @@ -277,7 +300,8 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err default: } if opt != nil && opt.Map != nil { - if allowed := opt.Map(parentStat.Path, parentStat); !allowed { + result := opt.Map(parentStat.Path, parentStat) + if result == MapResultSkipDir || result == MapResultExclude { continue } } @@ -295,11 +319,11 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err }) } -func patternWithoutTrailingGlob(p *fileutils.Pattern) string { +func patternWithoutTrailingGlob(p *patternmatcher.Pattern) string { patStr := p.String() - // We use filepath.Separator here because fileutils.Pattern patterns + // We use filepath.Separator here because patternmatcher.Pattern patterns // get transformed to use the native path separator: - // https://github.com/moby/moby/blob/79651b7a979b40e26af353ad283ca7ea5d67a855/pkg/fileutils/fileutils.go#L54 + // https://github.com/moby/patternmatcher/blob/130b41bafc16209dc1b52a103fdac1decad04f1a/patternmatcher.go#L52 patStr = strings.TrimSuffix(patStr, string(filepath.Separator)+"**") patStr = strings.TrimSuffix(patStr, string(filepath.Separator)+"*") return patStr diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go new file mode 100644 index 0000000000..3d43f7aea9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go @@ -0,0 +1,150 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package retry provides request retry functionality that can perform +// configurable exponential backoff for transient errors and honor any +// explicit throttle responses received. +package retry // import "go.opentelemetry.io/otel/exporters/otlp/internal/retry" + +import ( + "context" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" +) + +// DefaultConfig are the recommended defaults to use. +var DefaultConfig = Config{ + Enabled: true, + InitialInterval: 5 * time.Second, + MaxInterval: 30 * time.Second, + MaxElapsedTime: time.Minute, +} + +// Config defines configuration for retrying batches in case of export failure +// using an exponential backoff. +type Config struct { + // Enabled indicates whether to not retry sending batches in case of + // export failure. + Enabled bool + // InitialInterval the time to wait after the first failure before + // retrying. + InitialInterval time.Duration + // MaxInterval is the upper bound on backoff interval. Once this value is + // reached the delay between consecutive retries will always be + // `MaxInterval`. + MaxInterval time.Duration + // MaxElapsedTime is the maximum amount of time (including retries) spent + // trying to send a request/batch. Once this value is reached, the data + // is discarded. + MaxElapsedTime time.Duration +} + +// RequestFunc wraps a request with retry logic. +type RequestFunc func(context.Context, func(context.Context) error) error + +// EvaluateFunc returns if an error is retry-able and if an explicit throttle +// duration should be honored that was included in the error. +// +// The function must return true if the error argument is retry-able, +// otherwise it must return false for the first return parameter. +// +// The function must return a non-zero time.Duration if the error contains +// explicit throttle duration that should be honored, otherwise it must return +// a zero valued time.Duration. +type EvaluateFunc func(error) (bool, time.Duration) + +// RequestFunc returns a RequestFunc using the evaluate function to determine +// if requests can be retried and based on the exponential backoff +// configuration of c. +func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + if !c.Enabled { + return func(ctx context.Context, fn func(context.Context) error) error { + return fn(ctx) + } + } + + // Do not use NewExponentialBackOff since it calls Reset and the code here + // must call Reset after changing the InitialInterval (this saves an + // unnecessary call to Now). + b := &backoff.ExponentialBackOff{ + InitialInterval: c.InitialInterval, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: c.MaxInterval, + MaxElapsedTime: c.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + b.Reset() + + return func(ctx context.Context, fn func(context.Context) error) error { + for { + err := fn(ctx) + if err == nil { + return nil + } + + retryable, throttle := evaluate(err) + if !retryable { + return err + } + + bOff := b.NextBackOff() + if bOff == backoff.Stop { + return fmt.Errorf("max retry time elapsed: %w", err) + } + + // Wait for the greater of the backoff or throttle delay. + var delay time.Duration + if bOff > throttle { + delay = bOff + } else { + elapsed := b.GetElapsedTime() + if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) + } + delay = throttle + } + + if err := waitFunc(ctx, delay); err != nil { + return err + } + } + } +} + +// Allow override for testing. +var waitFunc = wait + +func wait(ctx context.Context, delay time.Duration) error { + timer := time.NewTimer(delay) + defer timer.Stop() + + select { + case <-ctx.Done(): + // Handle the case where the timer and context deadline end + // simultaneously by prioritizing the timer expiration nil value + // response. + select { + case <-timer.C: + default: + return ctx.Err() + } + case <-timer.C: + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go new file mode 100644 index 0000000000..77f13a1937 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go @@ -0,0 +1,227 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "net/url" + "os" + "path" + "strconv" + "strings" + "time" + + "go.opentelemetry.io/otel" +) + +var DefaultEnvOptionsReader = EnvOptionsReader{ + GetEnv: os.Getenv, + ReadFile: ioutil.ReadFile, +} + +func ApplyGRPCEnvConfigs(cfg Config) Config { + return DefaultEnvOptionsReader.ApplyGRPCEnvConfigs(cfg) +} + +func ApplyHTTPEnvConfigs(cfg Config) Config { + return DefaultEnvOptionsReader.ApplyHTTPEnvConfigs(cfg) +} + +type EnvOptionsReader struct { + GetEnv func(string) string + ReadFile func(filename string) ([]byte, error) +} + +func (e *EnvOptionsReader) ApplyHTTPEnvConfigs(cfg Config) Config { + opts := e.GetOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + return cfg +} + +func (e *EnvOptionsReader) ApplyGRPCEnvConfigs(cfg Config) Config { + opts := e.GetOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + return cfg +} + +func (e *EnvOptionsReader) GetOptionsFromEnv() []GenericOption { + var opts []GenericOption + + // Endpoint + if v, ok := e.getEnvValue("TRACES_ENDPOINT"); ok { + u, err := url.Parse(v) + // Ignore invalid values. + if err == nil { + // This is used to set the scheme for OTLP/HTTP. + if insecureSchema(u.Scheme) { + opts = append(opts, WithInsecure()) + } else { + opts = append(opts, WithSecure()) + } + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Traces.Endpoint = u.Host + // For endpoint URLs for OTLP/HTTP per-signal variables, the + // URL MUST be used as-is without any modification. The only + // exception is that if an URL contains no path part, the root + // path / MUST be used. + path := u.Path + if path == "" { + path = "/" + } + cfg.Traces.URLPath = path + return cfg + }, func(cfg Config) Config { + // For OTLP/gRPC endpoints, this is the target to which the + // exporter is going to send telemetry. + cfg.Traces.Endpoint = path.Join(u.Host, u.Path) + return cfg + })) + } + } else if v, ok = e.getEnvValue("ENDPOINT"); ok { + u, err := url.Parse(v) + // Ignore invalid values. + if err == nil { + // This is used to set the scheme for OTLP/HTTP. + if insecureSchema(u.Scheme) { + opts = append(opts, WithInsecure()) + } else { + opts = append(opts, WithSecure()) + } + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Traces.Endpoint = u.Host + // For OTLP/HTTP endpoint URLs without a per-signal + // configuration, the passed endpoint is used as a base URL + // and the signals are sent to these paths relative to that. + cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath) + return cfg + }, func(cfg Config) Config { + // For OTLP/gRPC endpoints, this is the target to which the + // exporter is going to send telemetry. + cfg.Traces.Endpoint = path.Join(u.Host, u.Path) + return cfg + })) + } + } + + // Certificate File + if path, ok := e.getEnvValue("CERTIFICATE"); ok { + if tls, err := e.readTLSConfig(path); err == nil { + opts = append(opts, WithTLSClientConfig(tls)) + } else { + otel.Handle(fmt.Errorf("failed to configure otlp exporter certificate '%s': %w", path, err)) + } + } + if path, ok := e.getEnvValue("TRACES_CERTIFICATE"); ok { + if tls, err := e.readTLSConfig(path); err == nil { + opts = append(opts, WithTLSClientConfig(tls)) + } else { + otel.Handle(fmt.Errorf("failed to configure otlp traces exporter certificate '%s': %w", path, err)) + } + } + + // Headers + if h, ok := e.getEnvValue("HEADERS"); ok { + opts = append(opts, WithHeaders(stringToHeader(h))) + } + if h, ok := e.getEnvValue("TRACES_HEADERS"); ok { + opts = append(opts, WithHeaders(stringToHeader(h))) + } + + // Compression + if c, ok := e.getEnvValue("COMPRESSION"); ok { + opts = append(opts, WithCompression(stringToCompression(c))) + } + if c, ok := e.getEnvValue("TRACES_COMPRESSION"); ok { + opts = append(opts, WithCompression(stringToCompression(c))) + } + // Timeout + if t, ok := e.getEnvValue("TIMEOUT"); ok { + if d, err := strconv.Atoi(t); err == nil { + opts = append(opts, WithTimeout(time.Duration(d)*time.Millisecond)) + } + } + if t, ok := e.getEnvValue("TRACES_TIMEOUT"); ok { + if d, err := strconv.Atoi(t); err == nil { + opts = append(opts, WithTimeout(time.Duration(d)*time.Millisecond)) + } + } + + return opts +} + +func insecureSchema(schema string) bool { + switch strings.ToLower(schema) { + case "http", "unix": + return true + default: + return false + } +} + +// getEnvValue gets an OTLP environment variable value of the specified key using the GetEnv function. +// This function already prepends the OTLP prefix to all key lookup. +func (e *EnvOptionsReader) getEnvValue(key string) (string, bool) { + v := strings.TrimSpace(e.GetEnv(fmt.Sprintf("OTEL_EXPORTER_OTLP_%s", key))) + return v, v != "" +} + +func (e *EnvOptionsReader) readTLSConfig(path string) (*tls.Config, error) { + b, err := e.ReadFile(path) + if err != nil { + return nil, err + } + return CreateTLSConfig(b) +} + +func stringToCompression(value string) Compression { + switch value { + case "gzip": + return GzipCompression + } + + return NoCompression +} + +func stringToHeader(value string) map[string]string { + headersPairs := strings.Split(value, ",") + headers := make(map[string]string) + + for _, header := range headersPairs { + nameValue := strings.SplitN(header, "=", 2) + if len(nameValue) < 2 { + continue + } + name, err := url.QueryUnescape(nameValue[0]) + if err != nil { + continue + } + trimmedName := strings.TrimSpace(name) + value, err := url.QueryUnescape(nameValue[1]) + if err != nil { + continue + } + trimmedValue := strings.TrimSpace(value) + + headers[trimmedName] = trimmedValue + } + + return headers +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go new file mode 100644 index 0000000000..e6fb14e00e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go @@ -0,0 +1,292 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" + +import ( + "crypto/tls" + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/encoding/gzip" + + "go.opentelemetry.io/otel/exporters/otlp/internal/retry" +) + +const ( + // DefaultTracesPath is a default URL path for endpoint that + // receives spans. + DefaultTracesPath string = "/v1/traces" + // DefaultTimeout is a default max waiting time for the backend to process + // each span batch. + DefaultTimeout time.Duration = 10 * time.Second +) + +type ( + SignalConfig struct { + Endpoint string + Insecure bool + TLSCfg *tls.Config + Headers map[string]string + Compression Compression + Timeout time.Duration + URLPath string + + // gRPC configurations + GRPCCredentials credentials.TransportCredentials + } + + Config struct { + // Signal specific configurations + Traces SignalConfig + + RetryConfig retry.Config + + // gRPC configurations + ReconnectionPeriod time.Duration + ServiceConfig string + DialOptions []grpc.DialOption + GRPCConn *grpc.ClientConn + } +) + +func NewDefaultConfig() Config { + c := Config{ + Traces: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorPort), + URLPath: DefaultTracesPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + }, + RetryConfig: retry.DefaultConfig, + } + + return c +} + +// NewGRPCConfig returns a new Config with all settings applied from opts and +// any unset setting using the default gRPC config values. +func NewGRPCConfig(opts ...GRPCOption) Config { + cfg := NewDefaultConfig() + cfg = ApplyGRPCEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + + if cfg.ServiceConfig != "" { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) + } + // Priroritize GRPCCredentials over Insecure (passing both is an error). + if cfg.Traces.GRPCCredentials != nil { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) + } else if cfg.Traces.Insecure { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) + } else { + // Default to using the host's root CA. + creds := credentials.NewTLS(nil) + cfg.Traces.GRPCCredentials = creds + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) + } + if cfg.Traces.Compression == GzipCompression { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) + } + if len(cfg.DialOptions) != 0 { + cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...) + } + if cfg.ReconnectionPeriod != 0 { + p := grpc.ConnectParams{ + Backoff: backoff.DefaultConfig, + MinConnectTimeout: cfg.ReconnectionPeriod, + } + cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p)) + } + + return cfg +} + +type ( + // GenericOption applies an option to the HTTP or gRPC driver. + GenericOption interface { + ApplyHTTPOption(Config) Config + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // HTTPOption applies an option to the HTTP driver. + HTTPOption interface { + ApplyHTTPOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // GRPCOption applies an option to the gRPC driver. + GRPCOption interface { + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } +) + +// genericOption is an option that applies the same logic +// for both gRPC and HTTP. +type genericOption struct { + fn func(Config) Config +} + +func (g *genericOption) ApplyGRPCOption(cfg Config) Config { + return g.fn(cfg) +} + +func (g *genericOption) ApplyHTTPOption(cfg Config) Config { + return g.fn(cfg) +} + +func (genericOption) private() {} + +func newGenericOption(fn func(cfg Config) Config) GenericOption { + return &genericOption{fn: fn} +} + +// splitOption is an option that applies different logics +// for gRPC and HTTP. +type splitOption struct { + httpFn func(Config) Config + grpcFn func(Config) Config +} + +func (g *splitOption) ApplyGRPCOption(cfg Config) Config { + return g.grpcFn(cfg) +} + +func (g *splitOption) ApplyHTTPOption(cfg Config) Config { + return g.httpFn(cfg) +} + +func (splitOption) private() {} + +func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption { + return &splitOption{httpFn: httpFn, grpcFn: grpcFn} +} + +// httpOption is an option that is only applied to the HTTP driver. +type httpOption struct { + fn func(Config) Config +} + +func (h *httpOption) ApplyHTTPOption(cfg Config) Config { + return h.fn(cfg) +} + +func (httpOption) private() {} + +func NewHTTPOption(fn func(cfg Config) Config) HTTPOption { + return &httpOption{fn: fn} +} + +// grpcOption is an option that is only applied to the gRPC driver. +type grpcOption struct { + fn func(Config) Config +} + +func (h *grpcOption) ApplyGRPCOption(cfg Config) Config { + return h.fn(cfg) +} + +func (grpcOption) private() {} + +func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { + return &grpcOption{fn: fn} +} + +// Generic Options + +func WithEndpoint(endpoint string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Endpoint = endpoint + return cfg + }) +} + +func WithCompression(compression Compression) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Compression = compression + return cfg + }) +} + +func WithURLPath(urlPath string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.URLPath = urlPath + return cfg + }) +} + +func WithRetry(rc retry.Config) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.RetryConfig = rc + return cfg + }) +} + +func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption { + return newSplitOption(func(cfg Config) Config { + cfg.Traces.TLSCfg = tlsCfg.Clone() + return cfg + }, func(cfg Config) Config { + cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg) + return cfg + }) +} + +func WithInsecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Insecure = true + return cfg + }) +} + +func WithSecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Insecure = false + return cfg + }) +} + +func WithHeaders(headers map[string]string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Headers = headers + return cfg + }) +} + +func WithTimeout(duration time.Duration) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Timeout = duration + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go new file mode 100644 index 0000000000..f69e31095d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" + +const ( + // DefaultCollectorPort is the port the Exporter will attempt connect to + // if no collector port is provided. + DefaultCollectorPort uint16 = 4317 + // DefaultCollectorHost is the host address the Exporter will attempt + // connect to if no collector address is provided. + DefaultCollectorHost string = "localhost" +) + +// Compression describes the compression used for payloads sent to the +// collector. +type Compression int + +const ( + // NoCompression tells the driver to send payloads without + // compression. + NoCompression Compression = iota + // GzipCompression tells the driver to send payloads after + // compressing them with gzip. + GzipCompression +) + +// Marshaler describes the kind of message format sent to the collector +type Marshaler int + +const ( + // MarshalProto tells the driver to send using the protobuf binary format. + MarshalProto Marshaler = iota + // MarshalJSON tells the driver to send using json format. + MarshalJSON +) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go new file mode 100644 index 0000000000..7287cf6cfe --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/tls.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" + +import ( + "crypto/tls" + "crypto/x509" + "errors" +) + +// CreateTLSConfig creates a tls.Config from a raw certificate bytes +// to verify a server certificate. +func CreateTLSConfig(certBytes []byte) (*tls.Config, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + + return &tls.Config{ + RootCAs: cp, + }, nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go new file mode 100644 index 0000000000..d709ffa96e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -0,0 +1,275 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + +import ( + "context" + "errors" + "sync" + "time" + + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/otel/exporters/otlp/internal/retry" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +type client struct { + endpoint string + dialOpts []grpc.DialOption + metadata metadata.MD + exportTimeout time.Duration + requestFunc retry.RequestFunc + + // stopCtx is used as a parent context for all exports. Therefore, when it + // is canceled with the stopFunc all exports are canceled. + stopCtx context.Context + // stopFunc cancels stopCtx, stopping any active exports. + stopFunc context.CancelFunc + + // ourConn keeps track of where conn was created: true if created here on + // Start, or false if passed with an option. This is important on Shutdown + // as the conn should only be closed if created here on start. Otherwise, + // it is up to the processes that passed the conn to close it. + ourConn bool + conn *grpc.ClientConn + tscMu sync.RWMutex + tsc coltracepb.TraceServiceClient +} + +// Compile time check *client implements otlptrace.Client. +var _ otlptrace.Client = (*client)(nil) + +// NewClient creates a new gRPC trace client. +func NewClient(opts ...Option) otlptrace.Client { + return newClient(opts...) +} + +func newClient(opts ...Option) *client { + cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...) + + ctx, cancel := context.WithCancel(context.Background()) + + c := &client{ + endpoint: cfg.Traces.Endpoint, + exportTimeout: cfg.Traces.Timeout, + requestFunc: cfg.RetryConfig.RequestFunc(retryable), + dialOpts: cfg.DialOptions, + stopCtx: ctx, + stopFunc: cancel, + conn: cfg.GRPCConn, + } + + if len(cfg.Traces.Headers) > 0 { + c.metadata = metadata.New(cfg.Traces.Headers) + } + + return c +} + +// Start establishes a gRPC connection to the collector. +func (c *client) Start(ctx context.Context) error { + if c.conn == nil { + // If the caller did not provide a ClientConn when the client was + // created, create one using the configuration they did provide. + conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...) + if err != nil { + return err + } + // Keep track that we own the lifecycle of this conn and need to close + // it on Shutdown. + c.ourConn = true + c.conn = conn + } + + // The otlptrace.Client interface states this method is called just once, + // so no need to check if already started. + c.tscMu.Lock() + c.tsc = coltracepb.NewTraceServiceClient(c.conn) + c.tscMu.Unlock() + + return nil +} + +var errAlreadyStopped = errors.New("the client is already stopped") + +// Stop shuts down the client. +// +// Any active connections to a remote endpoint are closed if they were created +// by the client. Any gRPC connection passed during creation using +// WithGRPCConn will not be closed. It is the caller's responsibility to +// handle cleanup of that resource. +// +// This method synchronizes with the UploadTraces method of the client. It +// will wait for any active calls to that method to complete unimpeded, or it +// will cancel any active calls if ctx expires. If ctx expires, the context +// error will be forwarded as the returned error. All client held resources +// will still be released in this situation. +// +// If the client has already stopped, an error will be returned describing +// this. +func (c *client) Stop(ctx context.Context) error { + // Acquire the c.tscMu lock within the ctx lifetime. + acquired := make(chan struct{}) + go func() { + c.tscMu.Lock() + close(acquired) + }() + var err error + select { + case <-ctx.Done(): + // The Stop timeout is reached. Kill any remaining exports to force + // the clear of the lock and save the timeout error to return and + // signal the shutdown timed out before cleanly stopping. + c.stopFunc() + err = ctx.Err() + + // To ensure the client is not left in a dirty state c.tsc needs to be + // set to nil. To avoid the race condition when doing this, ensure + // that all the exports are killed (initiated by c.stopFunc). + <-acquired + case <-acquired: + } + // Hold the tscMu lock for the rest of the function to ensure no new + // exports are started. + defer c.tscMu.Unlock() + + // The otlptrace.Client interface states this method is called only + // once, but there is no guarantee it is called after Start. Ensure the + // client is started before doing anything and let the called know if they + // made a mistake. + if c.tsc == nil { + return errAlreadyStopped + } + + // Clear c.tsc to signal the client is stopped. + c.tsc = nil + + if c.ourConn { + closeErr := c.conn.Close() + // A context timeout error takes precedence over this error. + if err == nil && closeErr != nil { + err = closeErr + } + } + return err +} + +var errShutdown = errors.New("the client is shutdown") + +// UploadTraces sends a batch of spans. +// +// Retryable errors from the server will be handled according to any +// RetryConfig the client was created with. +func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { + // Hold a read lock to ensure a shut down initiated after this starts does + // not abandon the export. This read lock acquire has less priority than a + // write lock acquire (i.e. Stop), meaning if the client is shutting down + // this will come after the shut down. + c.tscMu.RLock() + defer c.tscMu.RUnlock() + + if c.tsc == nil { + return errShutdown + } + + ctx, cancel := c.exportContext(ctx) + defer cancel() + + return c.requestFunc(ctx, func(iCtx context.Context) error { + _, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{ + ResourceSpans: protoSpans, + }) + // nil is converted to OK. + if status.Code(err) == codes.OK { + // Success. + return nil + } + return err + }) +} + +// exportContext returns a copy of parent with an appropriate deadline and +// cancellation function. +// +// It is the callers responsibility to cancel the returned context once its +// use is complete, via the parent or directly with the returned CancelFunc, to +// ensure all resources are correctly released. +func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) { + var ( + ctx context.Context + cancel context.CancelFunc + ) + + if c.exportTimeout > 0 { + ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + } else { + ctx, cancel = context.WithCancel(parent) + } + + if c.metadata.Len() > 0 { + ctx = metadata.NewOutgoingContext(ctx, c.metadata) + } + + // Unify the client stopCtx with the parent. + go func() { + select { + case <-ctx.Done(): + case <-c.stopCtx.Done(): + // Cancel the export as the shutdown has timed out. + cancel() + } + }() + + return ctx, cancel +} + +// retryable returns if err identifies a request that can be retried and a +// duration to wait for if an explicit throttle time is included in err. +func retryable(err error) (bool, time.Duration) { + //func retryable(err error) (bool, time.Duration) { + s := status.Convert(err) + switch s.Code() { + case codes.Canceled, + codes.DeadlineExceeded, + codes.ResourceExhausted, + codes.Aborted, + codes.OutOfRange, + codes.Unavailable, + codes.DataLoss: + return true, throttleDelay(s) + } + + // Not a retry-able error. + return false, 0 +} + +// throttleDelay returns a duration to wait for if an explicit throttle time +// is included in the response status. +func throttleDelay(status *status.Status) time.Duration { + for _, detail := range status.Details() { + if t, ok := detail.(*errdetails.RetryInfo); ok { + return t.RetryDelay.AsDuration() + } + } + return 0 +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go new file mode 100644 index 0000000000..89af41002f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + +import ( + "context" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" +) + +// New constructs a new Exporter and starts it. +func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) { + return otlptrace.New(ctx, NewClient(opts...)) +} + +// NewUnstarted constructs a new Exporter and does not start it. +func NewUnstarted(opts ...Option) *otlptrace.Exporter { + return otlptrace.NewUnstarted(NewClient(opts...)) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go new file mode 100644 index 0000000000..e2e5bd696f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -0,0 +1,190 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + +import ( + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/internal/retry" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" +) + +// Option applies an option to the gRPC driver. +type Option interface { + applyGRPCOption(otlpconfig.Config) otlpconfig.Config +} + +func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption { + converted := make([]otlpconfig.GRPCOption, len(opts)) + for i, o := range opts { + converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption) + } + return converted +} + +// RetryConfig defines configuration for retrying export of span batches that +// failed to be received by the target endpoint. +// +// This configuration does not define any network retry strategy. That is +// entirely handled by the gRPC ClientConn. +type RetryConfig retry.Config + +type wrappedOption struct { + otlpconfig.GRPCOption +} + +func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config { + return w.ApplyGRPCOption(cfg) +} + +// WithInsecure disables client transport security for the exporter's gRPC +// connection just like grpc.WithInsecure() +// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by +// default, client security is required unless WithInsecure is used. +// +// This option has no effect if WithGRPCConn is used. +func WithInsecure() Option { + return wrappedOption{otlpconfig.WithInsecure()} +} + +// WithEndpoint sets the target endpoint the exporter will connect to. If +// unset, localhost:4317 will be used as a default. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpoint(endpoint string) Option { + return wrappedOption{otlpconfig.WithEndpoint(endpoint)} +} + +// WithReconnectionPeriod set the minimum amount of time between connection +// attempts to the target endpoint. +// +// This option has no effect if WithGRPCConn is used. +func WithReconnectionPeriod(rp time.Duration) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.ReconnectionPeriod = rp + return cfg + })} +} + +func compressorToCompression(compressor string) otlpconfig.Compression { + switch compressor { + case "gzip": + return otlpconfig.GzipCompression + } + + otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor)) + return otlpconfig.NoCompression +} + +// WithCompressor sets the compressor for the gRPC client to use when sending +// requests. It is the responsibility of the caller to ensure that the +// compressor set has been registered with google.golang.org/grpc/encoding. +// This can be done by encoding.RegisterCompressor. Some compressors +// auto-register on import, such as gzip, which can be registered by calling +// `import _ "google.golang.org/grpc/encoding/gzip"`. +// +// This option has no effect if WithGRPCConn is used. +func WithCompressor(compressor string) Option { + return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))} +} + +// WithHeaders will send the provided headers with each gRPC requests. +func WithHeaders(headers map[string]string) Option { + return wrappedOption{otlpconfig.WithHeaders(headers)} +} + +// WithTLSCredentials allows the connection to use TLS credentials when +// talking to the server. It takes in grpc.TransportCredentials instead of say +// a Certificate file or a tls.Certificate, because the retrieving of these +// credentials can be done in many ways e.g. plain file, in code tls.Config or +// by certificate rotation, so it is up to the caller to decide what to use. +// +// This option has no effect if WithGRPCConn is used. +func WithTLSCredentials(creds credentials.TransportCredentials) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.Traces.GRPCCredentials = creds + return cfg + })} +} + +// WithServiceConfig defines the default gRPC service config used. +// +// This option has no effect if WithGRPCConn is used. +func WithServiceConfig(serviceConfig string) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.ServiceConfig = serviceConfig + return cfg + })} +} + +// WithDialOption sets explicit grpc.DialOptions to use when making a +// connection. The options here are appended to the internal grpc.DialOptions +// used so they will take precedence over any other internal grpc.DialOptions +// they might conflict with. +// +// This option has no effect if WithGRPCConn is used. +func WithDialOption(opts ...grpc.DialOption) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.DialOptions = opts + return cfg + })} +} + +// WithGRPCConn sets conn as the gRPC ClientConn used for all communication. +// +// This option takes precedence over any other option that relates to +// establishing or persisting a gRPC connection to a target endpoint. Any +// other option of those types passed will be ignored. +// +// It is the callers responsibility to close the passed conn. The client +// Shutdown method will not close this connection. +func WithGRPCConn(conn *grpc.ClientConn) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.GRPCConn = conn + return cfg + })} +} + +// WithTimeout sets the max amount of time a client will attempt to export a +// batch of spans. This takes precedence over any retry settings defined with +// WithRetry, once this time limit has been reached the export is abandoned +// and the batch of spans is dropped. +// +// If unset, the default timeout will be set to 10 seconds. +func WithTimeout(duration time.Duration) Option { + return wrappedOption{otlpconfig.WithTimeout(duration)} +} + +// WithRetry sets the retry policy for transient retryable errors that may be +// returned by the target endpoint when exporting a batch of spans. +// +// If the target endpoint responds with not only a retryable error, but +// explicitly returns a backoff time in the response. That time will take +// precedence over these settings. +// +// These settings do not define any network retry strategy. That is entirely +// handled by the gRPC ClientConn. +// +// If unset, the default retry policy will be used. It will retry the export +// 5 seconds after receiving a retryable error and increase exponentially +// after each error for no more than a total time of 1 minute. +func WithRetry(settings RetryConfig) Option { + return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))} +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go new file mode 100644 index 0000000000..81487f9b6f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go @@ -0,0 +1,324 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + +import ( + "bytes" + "compress/gzip" + "context" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/otel/exporters/otlp/internal/retry" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +const contentTypeProto = "application/x-protobuf" + +var gzPool = sync.Pool{ + New: func() interface{} { + w := gzip.NewWriter(ioutil.Discard) + return w + }, +} + +// Keep it in sync with golang's DefaultTransport from net/http! We +// have our own copy to avoid handling a situation where the +// DefaultTransport is overwritten with some different implementation +// of http.RoundTripper or it's modified by other package. +var ourTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, +} + +type client struct { + name string + cfg otlpconfig.SignalConfig + generalCfg otlpconfig.Config + requestFunc retry.RequestFunc + client *http.Client + stopCh chan struct{} + stopOnce sync.Once +} + +var _ otlptrace.Client = (*client)(nil) + +// NewClient creates a new HTTP trace client. +func NewClient(opts ...Option) otlptrace.Client { + cfg := otlpconfig.NewDefaultConfig() + cfg = otlpconfig.ApplyHTTPEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.applyHTTPOption(cfg) + } + + for pathPtr, defaultPath := range map[*string]string{ + &cfg.Traces.URLPath: otlpconfig.DefaultTracesPath, + } { + tmp := strings.TrimSpace(*pathPtr) + if tmp == "" { + tmp = defaultPath + } else { + tmp = path.Clean(tmp) + if !path.IsAbs(tmp) { + tmp = fmt.Sprintf("/%s", tmp) + } + } + *pathPtr = tmp + } + + httpClient := &http.Client{ + Transport: ourTransport, + Timeout: cfg.Traces.Timeout, + } + if cfg.Traces.TLSCfg != nil { + transport := ourTransport.Clone() + transport.TLSClientConfig = cfg.Traces.TLSCfg + httpClient.Transport = transport + } + + stopCh := make(chan struct{}) + return &client{ + name: "traces", + cfg: cfg.Traces, + generalCfg: cfg, + requestFunc: cfg.RetryConfig.RequestFunc(evaluate), + stopCh: stopCh, + client: httpClient, + } +} + +// Start does nothing in a HTTP client +func (d *client) Start(ctx context.Context) error { + // nothing to do + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + return nil +} + +// Stop shuts down the client and interrupt any in-flight request. +func (d *client) Stop(ctx context.Context) error { + d.stopOnce.Do(func() { + close(d.stopCh) + }) + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + return nil +} + +// UploadTraces sends a batch of spans to the collector. +func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { + pbRequest := &coltracepb.ExportTraceServiceRequest{ + ResourceSpans: protoSpans, + } + rawRequest, err := proto.Marshal(pbRequest) + if err != nil { + return err + } + + ctx, cancel := d.contextWithStop(ctx) + defer cancel() + + request, err := d.newRequest(rawRequest) + if err != nil { + return err + } + + return d.requestFunc(ctx, func(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + request.reset(ctx) + resp, err := d.client.Do(request.Request) + if err != nil { + return err + } + + var rErr error + switch resp.StatusCode { + case http.StatusOK: + // Success, do not retry. + case http.StatusTooManyRequests, + http.StatusServiceUnavailable: + // Retry-able failure. + rErr = newResponseError(resp.Header) + + // Going to retry, drain the body to reuse the connection. + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + _ = resp.Body.Close() + return err + } + default: + rErr = fmt.Errorf("failed to send %s to %s: %s", d.name, request.URL, resp.Status) + } + + if err := resp.Body.Close(); err != nil { + return err + } + return rErr + }) +} + +func (d *client) newRequest(body []byte) (request, error) { + u := url.URL{Scheme: d.getScheme(), Host: d.cfg.Endpoint, Path: d.cfg.URLPath} + r, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return request{Request: r}, err + } + + for k, v := range d.cfg.Headers { + r.Header.Set(k, v) + } + r.Header.Set("Content-Type", contentTypeProto) + + req := request{Request: r} + switch Compression(d.cfg.Compression) { + case NoCompression: + r.ContentLength = (int64)(len(body)) + req.bodyReader = bodyReader(body) + case GzipCompression: + // Ensure the content length is not used. + r.ContentLength = -1 + r.Header.Set("Content-Encoding", "gzip") + + gz := gzPool.Get().(*gzip.Writer) + defer gzPool.Put(gz) + + var b bytes.Buffer + gz.Reset(&b) + + if _, err := gz.Write(body); err != nil { + return req, err + } + // Close needs to be called to ensure body if fully written. + if err := gz.Close(); err != nil { + return req, err + } + + req.bodyReader = bodyReader(b.Bytes()) + } + + return req, nil +} + +// bodyReader returns a closure returning a new reader for buf. +func bodyReader(buf []byte) func() io.ReadCloser { + return func() io.ReadCloser { + return ioutil.NopCloser(bytes.NewReader(buf)) + } +} + +// request wraps an http.Request with a resettable body reader. +type request struct { + *http.Request + + // bodyReader allows the same body to be used for multiple requests. + bodyReader func() io.ReadCloser +} + +// reset reinitializes the request Body and uses ctx for the request. +func (r *request) reset(ctx context.Context) { + r.Body = r.bodyReader() + r.Request = r.Request.WithContext(ctx) +} + +// retryableError represents a request failure that can be retried. +type retryableError struct { + throttle int64 +} + +// newResponseError returns a retryableError and will extract any explicit +// throttle delay contained in headers. +func newResponseError(header http.Header) error { + var rErr retryableError + if s, ok := header["Retry-After"]; ok { + if t, err := strconv.ParseInt(s[0], 10, 64); err == nil { + rErr.throttle = t + } + } + return rErr +} + +func (e retryableError) Error() string { + return "retry-able request failure" +} + +// evaluate returns if err is retry-able. If it is and it includes an explicit +// throttling delay, that delay is also returned. +func evaluate(err error) (bool, time.Duration) { + if err == nil { + return false, 0 + } + + rErr, ok := err.(retryableError) + if !ok { + return false, 0 + } + + return true, time.Duration(rErr.throttle) +} + +func (d *client) getScheme() string { + if d.cfg.Insecure { + return "http" + } + return "https" +} + +func (d *client) contextWithStop(ctx context.Context) (context.Context, context.CancelFunc) { + // Unify the parent context Done signal with the client's stop + // channel. + ctx, cancel := context.WithCancel(ctx) + go func(ctx context.Context, cancel context.CancelFunc) { + select { + case <-ctx.Done(): + // Nothing to do, either cancelled or deadline + // happened. + case <-d.stopCh: + cancel() + } + }(ctx, cancel) + return ctx, cancel +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go new file mode 100644 index 0000000000..e7f066b43c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/doc.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package otlptracehttp a client that sends traces to the collector using HTTP +with binary protobuf payloads. +*/ +package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go new file mode 100644 index 0000000000..23b8642040 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/exporter.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + +import ( + "context" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" +) + +// New constructs a new Exporter and starts it. +func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) { + return otlptrace.New(ctx, NewClient(opts...)) +} + +// NewUnstarted constructs a new Exporter and does not start it. +func NewUnstarted(opts ...Option) *otlptrace.Exporter { + return otlptrace.NewUnstarted(NewClient(opts...)) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go new file mode 100644 index 0000000000..e550cfb5d5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + +import ( + "crypto/tls" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/internal/retry" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" +) + +// Compression describes the compression used for payloads sent to the +// collector. +type Compression otlpconfig.Compression + +const ( + // NoCompression tells the driver to send payloads without + // compression. + NoCompression = Compression(otlpconfig.NoCompression) + // GzipCompression tells the driver to send payloads after + // compressing them with gzip. + GzipCompression = Compression(otlpconfig.GzipCompression) +) + +// Option applies an option to the HTTP client. +type Option interface { + applyHTTPOption(otlpconfig.Config) otlpconfig.Config +} + +// RetryConfig defines configuration for retrying batches in case of export +// failure using an exponential backoff. +type RetryConfig retry.Config + +type wrappedOption struct { + otlpconfig.HTTPOption +} + +func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config { + return w.ApplyHTTPOption(cfg) +} + +// WithEndpoint allows one to set the address of the collector +// endpoint that the driver will use to send spans. If +// unset, it will instead try to use +// the default endpoint (localhost:4317). Note that the endpoint +// must not contain any URL path. +func WithEndpoint(endpoint string) Option { + return wrappedOption{otlpconfig.WithEndpoint(endpoint)} +} + +// WithCompression tells the driver to compress the sent data. +func WithCompression(compression Compression) Option { + return wrappedOption{otlpconfig.WithCompression(otlpconfig.Compression(compression))} +} + +// WithURLPath allows one to override the default URL path used +// for sending traces. If unset, default ("/v1/traces") will be used. +func WithURLPath(urlPath string) Option { + return wrappedOption{otlpconfig.WithURLPath(urlPath)} +} + +// WithTLSClientConfig can be used to set up a custom TLS +// configuration for the client used to send payloads to the +// collector. Use it if you want to use a custom certificate. +func WithTLSClientConfig(tlsCfg *tls.Config) Option { + return wrappedOption{otlpconfig.WithTLSClientConfig(tlsCfg)} +} + +// WithInsecure tells the driver to connect to the collector using the +// HTTP scheme, instead of HTTPS. +func WithInsecure() Option { + return wrappedOption{otlpconfig.WithInsecure()} +} + +// WithHeaders allows one to tell the driver to send additional HTTP +// headers with the payloads. Specifying headers like Content-Length, +// Content-Encoding and Content-Type may result in a broken driver. +func WithHeaders(headers map[string]string) Option { + return wrappedOption{otlpconfig.WithHeaders(headers)} +} + +// WithTimeout tells the driver the max waiting time for the backend to process +// each spans batch. If unset, the default will be 10 seconds. +func WithTimeout(duration time.Duration) Option { + return wrappedOption{otlpconfig.WithTimeout(duration)} +} + +// WithRetry configures the retry policy for transient errors that may occurs +// when exporting traces. An exponential back-off algorithm is used to ensure +// endpoints are not overwhelmed with retries. If unset, the default retry +// policy will retry after 5 seconds and increase exponentially after each +// error for a total of 1 minute. +func WithRetry(rc RetryConfig) Option { + return wrappedOption{otlpconfig.WithRetry(retry.Config(rc))} +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go new file mode 100644 index 0000000000..104489e79f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/exporter.go @@ -0,0 +1,85 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracetest is a testing helper package for the SDK. User can +// configure no-op or in-memory exporters to verify different SDK behaviors or +// custom instrumentation. +package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/sdk/trace" +) + +var _ trace.SpanExporter = (*NoopExporter)(nil) + +// NewNoopExporter returns a new no-op exporter. +func NewNoopExporter() *NoopExporter { + return new(NoopExporter) +} + +// NoopExporter is an exporter that drops all received spans and performs no +// action. +type NoopExporter struct{} + +// ExportSpans handles export of spans by dropping them. +func (nsb *NoopExporter) ExportSpans(context.Context, []trace.ReadOnlySpan) error { return nil } + +// Shutdown stops the exporter by doing nothing. +func (nsb *NoopExporter) Shutdown(context.Context) error { return nil } + +var _ trace.SpanExporter = (*InMemoryExporter)(nil) + +// NewInMemoryExporter returns a new InMemoryExporter. +func NewInMemoryExporter() *InMemoryExporter { + return new(InMemoryExporter) +} + +// InMemoryExporter is an exporter that stores all received spans in-memory. +type InMemoryExporter struct { + mu sync.Mutex + ss SpanStubs +} + +// ExportSpans handles export of spans by storing them in memory. +func (imsb *InMemoryExporter) ExportSpans(_ context.Context, spans []trace.ReadOnlySpan) error { + imsb.mu.Lock() + defer imsb.mu.Unlock() + imsb.ss = append(imsb.ss, SpanStubsFromReadOnlySpans(spans)...) + return nil +} + +// Shutdown stops the exporter by clearing spans held in memory. +func (imsb *InMemoryExporter) Shutdown(context.Context) error { + imsb.Reset() + return nil +} + +// Reset the current in-memory storage. +func (imsb *InMemoryExporter) Reset() { + imsb.mu.Lock() + defer imsb.mu.Unlock() + imsb.ss = nil +} + +// GetSpans returns the current in-memory stored spans. +func (imsb *InMemoryExporter) GetSpans() SpanStubs { + imsb.mu.Lock() + defer imsb.mu.Unlock() + ret := make(SpanStubs, len(imsb.ss)) + copy(ret, imsb.ss) + return ret +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go new file mode 100644 index 0000000000..dcf32c148d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest" + +import ( + "context" + "sync" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" +) + +// SpanRecorder records started and ended spans. +type SpanRecorder struct { + startedMu sync.RWMutex + started []sdktrace.ReadWriteSpan + + endedMu sync.RWMutex + ended []sdktrace.ReadOnlySpan +} + +var _ sdktrace.SpanProcessor = (*SpanRecorder)(nil) + +func NewSpanRecorder() *SpanRecorder { + return new(SpanRecorder) +} + +// OnStart records started spans. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) OnStart(_ context.Context, s sdktrace.ReadWriteSpan) { + sr.startedMu.Lock() + defer sr.startedMu.Unlock() + sr.started = append(sr.started, s) +} + +// OnEnd records completed spans. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) OnEnd(s sdktrace.ReadOnlySpan) { + sr.endedMu.Lock() + defer sr.endedMu.Unlock() + sr.ended = append(sr.ended, s) +} + +// Shutdown does nothing. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) Shutdown(context.Context) error { + return nil +} + +// ForceFlush does nothing. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) ForceFlush(context.Context) error { + return nil +} + +// Started returns a copy of all started spans that have been recorded. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) Started() []sdktrace.ReadWriteSpan { + sr.startedMu.RLock() + defer sr.startedMu.RUnlock() + dst := make([]sdktrace.ReadWriteSpan, len(sr.started)) + copy(dst, sr.started) + return dst +} + +// Ended returns a copy of all ended spans that have been recorded. +// +// This method is safe to be called concurrently. +func (sr *SpanRecorder) Ended() []sdktrace.ReadOnlySpan { + sr.endedMu.RLock() + defer sr.endedMu.RUnlock() + dst := make([]sdktrace.ReadOnlySpan, len(sr.ended)) + copy(dst, sr.ended) + return dst +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go new file mode 100644 index 0000000000..ece4633c52 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go @@ -0,0 +1,163 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetest // import "go.opentelemetry.io/otel/sdk/trace/tracetest" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" +) + +type SpanStubs []SpanStub + +// SpanStubsFromReadOnlySpans returns SpanStubs populated from ro. +func SpanStubsFromReadOnlySpans(ro []tracesdk.ReadOnlySpan) SpanStubs { + if len(ro) == 0 { + return nil + } + + s := make(SpanStubs, 0, len(ro)) + for _, r := range ro { + s = append(s, SpanStubFromReadOnlySpan(r)) + } + + return s +} + +// Snapshots returns s as a slice of ReadOnlySpans. +func (s SpanStubs) Snapshots() []tracesdk.ReadOnlySpan { + if len(s) == 0 { + return nil + } + + ro := make([]tracesdk.ReadOnlySpan, len(s)) + for i := 0; i < len(s); i++ { + ro[i] = s[i].Snapshot() + } + return ro +} + +// SpanStub is a stand-in for a Span. +type SpanStub struct { + Name string + SpanContext trace.SpanContext + Parent trace.SpanContext + SpanKind trace.SpanKind + StartTime time.Time + EndTime time.Time + Attributes []attribute.KeyValue + Events []tracesdk.Event + Links []tracesdk.Link + Status tracesdk.Status + DroppedAttributes int + DroppedEvents int + DroppedLinks int + ChildSpanCount int + Resource *resource.Resource + InstrumentationLibrary instrumentation.Library +} + +// SpanStubFromReadOnlySpan returns a SpanStub populated from ro. +func SpanStubFromReadOnlySpan(ro tracesdk.ReadOnlySpan) SpanStub { + if ro == nil { + return SpanStub{} + } + + return SpanStub{ + Name: ro.Name(), + SpanContext: ro.SpanContext(), + Parent: ro.Parent(), + SpanKind: ro.SpanKind(), + StartTime: ro.StartTime(), + EndTime: ro.EndTime(), + Attributes: ro.Attributes(), + Events: ro.Events(), + Links: ro.Links(), + Status: ro.Status(), + DroppedAttributes: ro.DroppedAttributes(), + DroppedEvents: ro.DroppedEvents(), + DroppedLinks: ro.DroppedLinks(), + ChildSpanCount: ro.ChildSpanCount(), + Resource: ro.Resource(), + InstrumentationLibrary: ro.InstrumentationLibrary(), + } +} + +// Snapshot returns a read-only copy of the SpanStub. +func (s SpanStub) Snapshot() tracesdk.ReadOnlySpan { + return spanSnapshot{ + name: s.Name, + spanContext: s.SpanContext, + parent: s.Parent, + spanKind: s.SpanKind, + startTime: s.StartTime, + endTime: s.EndTime, + attributes: s.Attributes, + events: s.Events, + links: s.Links, + status: s.Status, + droppedAttributes: s.DroppedAttributes, + droppedEvents: s.DroppedEvents, + droppedLinks: s.DroppedLinks, + childSpanCount: s.ChildSpanCount, + resource: s.Resource, + instrumentationLibrary: s.InstrumentationLibrary, + } +} + +type spanSnapshot struct { + // Embed the interface to implement the private method. + tracesdk.ReadOnlySpan + + name string + spanContext trace.SpanContext + parent trace.SpanContext + spanKind trace.SpanKind + startTime time.Time + endTime time.Time + attributes []attribute.KeyValue + events []tracesdk.Event + links []tracesdk.Link + status tracesdk.Status + droppedAttributes int + droppedEvents int + droppedLinks int + childSpanCount int + resource *resource.Resource + instrumentationLibrary instrumentation.Library +} + +func (s spanSnapshot) Name() string { return s.name } +func (s spanSnapshot) SpanContext() trace.SpanContext { return s.spanContext } +func (s spanSnapshot) Parent() trace.SpanContext { return s.parent } +func (s spanSnapshot) SpanKind() trace.SpanKind { return s.spanKind } +func (s spanSnapshot) StartTime() time.Time { return s.startTime } +func (s spanSnapshot) EndTime() time.Time { return s.endTime } +func (s spanSnapshot) Attributes() []attribute.KeyValue { return s.attributes } +func (s spanSnapshot) Links() []tracesdk.Link { return s.links } +func (s spanSnapshot) Events() []tracesdk.Event { return s.events } +func (s spanSnapshot) Status() tracesdk.Status { return s.status } +func (s spanSnapshot) DroppedAttributes() int { return s.droppedAttributes } +func (s spanSnapshot) DroppedLinks() int { return s.droppedLinks } +func (s spanSnapshot) DroppedEvents() int { return s.droppedEvents } +func (s spanSnapshot) ChildSpanCount() int { return s.childSpanCount } +func (s spanSnapshot) Resource() *resource.Resource { return s.resource } +func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { + return s.instrumentationLibrary +} diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go new file mode 100644 index 0000000000..ce2f15ed28 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go @@ -0,0 +1,133 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gzip implements and registers the gzip compressor +// during the initialization. +// +// Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package gzip + +import ( + "compress/gzip" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "sync" + + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the gzip compressor. +const Name = "gzip" + +func init() { + c := &compressor{} + c.poolCompressor.New = func() interface{} { + return &writer{Writer: gzip.NewWriter(ioutil.Discard), pool: &c.poolCompressor} + } + encoding.RegisterCompressor(c) +} + +type writer struct { + *gzip.Writer + pool *sync.Pool +} + +// SetLevel updates the registered gzip compressor to use the compression level specified (gzip.HuffmanOnly is not supported). +// NOTE: this function must only be called during initialization time (i.e. in an init() function), +// and is not thread-safe. +// +// The error returned will be nil if the specified level is valid. +func SetLevel(level int) error { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return fmt.Errorf("grpc: invalid gzip compression level: %d", level) + } + c := encoding.GetCompressor(Name).(*compressor) + c.poolCompressor.New = func() interface{} { + w, err := gzip.NewWriterLevel(ioutil.Discard, level) + if err != nil { + panic(err) + } + return &writer{Writer: w, pool: &c.poolCompressor} + } + return nil +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { + z := c.poolCompressor.Get().(*writer) + z.Writer.Reset(w) + return z, nil +} + +func (z *writer) Close() error { + defer z.pool.Put(z) + return z.Writer.Close() +} + +type reader struct { + *gzip.Reader + pool *sync.Pool +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { + z, inPool := c.poolDecompressor.Get().(*reader) + if !inPool { + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + return &reader{Reader: newZ, pool: &c.poolDecompressor}, nil + } + if err := z.Reset(r); err != nil { + c.poolDecompressor.Put(z) + return nil, err + } + return z, nil +} + +func (z *reader) Read(p []byte) (n int, err error) { + n, err = z.Reader.Read(p) + if err == io.EOF { + z.pool.Put(z) + } + return n, err +} + +// RFC1952 specifies that the last four bytes "contains the size of +// the original (uncompressed) input data modulo 2^32." +// gRPC has a max message size of 2GB so we don't need to worry about wraparound. +func (c *compressor) DecompressedSize(buf []byte) int { + last := len(buf) + if last < 4 { + return -1 + } + return int(binary.LittleEndian.Uint32(buf[last-4 : last])) +} + +func (c *compressor) Name() string { + return Name +} + +type compressor struct { + poolCompressor sync.Pool + poolDecompressor sync.Pool +} diff --git a/vendor/modules.txt b/vendor/modules.txt index cb0ee6a754..f0038410f7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -164,6 +164,9 @@ github.com/beorn7/perks/quantile # github.com/bsphere/le_go v0.0.0-20200109081728-fc06dab2caa8 ## explicit; go 1.12 github.com/bsphere/le_go +# github.com/cenkalti/backoff/v4 v4.1.2 +## explicit; go 1.13 +github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.1.2 ## explicit; go 1.11 github.com/cespare/xxhash/v2 @@ -230,6 +233,7 @@ github.com/containerd/containerd/content github.com/containerd/containerd/content/local github.com/containerd/containerd/content/proxy github.com/containerd/containerd/contrib/nvidia +github.com/containerd/containerd/contrib/seccomp/kernelversion github.com/containerd/containerd/defaults github.com/containerd/containerd/diff github.com/containerd/containerd/diff/walking @@ -278,6 +282,7 @@ github.com/containerd/containerd/services/content/contentserver github.com/containerd/containerd/services/introspection github.com/containerd/containerd/services/server/config github.com/containerd/containerd/snapshots +github.com/containerd/containerd/snapshots/overlay/overlayutils github.com/containerd/containerd/snapshots/proxy github.com/containerd/containerd/sys github.com/containerd/containerd/sys/reaper @@ -294,10 +299,12 @@ github.com/containerd/fifo # github.com/containerd/go-runc v1.0.0 ## explicit; go 1.13 github.com/containerd/go-runc -# github.com/containerd/stargz-snapshotter v0.11.3 -## explicit; go 1.16 -github.com/containerd/stargz-snapshotter/snapshot/overlayutils -# github.com/containerd/stargz-snapshotter/estargz v0.11.3 +# github.com/containerd/nydus-snapshotter v0.3.1 +## explicit; go 1.17 +github.com/containerd/nydus-snapshotter/pkg/converter +github.com/containerd/nydus-snapshotter/pkg/converter/tool +github.com/containerd/nydus-snapshotter/pkg/errdefs +# github.com/containerd/stargz-snapshotter/estargz v0.13.0 ## explicit; go 1.16 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil @@ -500,6 +507,12 @@ github.com/hashicorp/serf/serf # github.com/imdario/mergo v0.3.12 ## explicit; go 1.13 github.com/imdario/mergo +# github.com/in-toto/in-toto-golang v0.5.0 +## explicit; go 1.17 +github.com/in-toto/in-toto-golang/in_toto +github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common +github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1 +github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2 # github.com/inconshreveable/mousetrap v1.0.1 ## explicit; go 1.18 github.com/inconshreveable/mousetrap @@ -527,8 +540,8 @@ github.com/mistifyio/go-zfs # github.com/mitchellh/hashstructure/v2 v2.0.2 ## explicit; go 1.14 github.com/mitchellh/hashstructure/v2 -# github.com/moby/buildkit v0.10.6 -## explicit; go 1.17 +# github.com/moby/buildkit v0.11.2 +## explicit; go 1.18 github.com/moby/buildkit/api/services/control github.com/moby/buildkit/api/types github.com/moby/buildkit/cache @@ -547,16 +560,23 @@ github.com/moby/buildkit/client/connhelper github.com/moby/buildkit/client/llb github.com/moby/buildkit/client/llb/imagemetaresolver github.com/moby/buildkit/client/ociindex +github.com/moby/buildkit/cmd/buildkitd/config github.com/moby/buildkit/control github.com/moby/buildkit/control/gateway github.com/moby/buildkit/executor github.com/moby/buildkit/executor/oci github.com/moby/buildkit/executor/runcexecutor github.com/moby/buildkit/exporter +github.com/moby/buildkit/exporter/attestation +github.com/moby/buildkit/exporter/containerimage github.com/moby/buildkit/exporter/containerimage/exptypes +github.com/moby/buildkit/exporter/containerimage/image github.com/moby/buildkit/exporter/local github.com/moby/buildkit/exporter/tar +github.com/moby/buildkit/exporter/util/epoch github.com/moby/buildkit/frontend +github.com/moby/buildkit/frontend/attestations +github.com/moby/buildkit/frontend/attestations/sbom github.com/moby/buildkit/frontend/dockerfile/builder github.com/moby/buildkit/frontend/dockerfile/command github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb @@ -570,6 +590,8 @@ github.com/moby/buildkit/frontend/gateway/forwarder github.com/moby/buildkit/frontend/gateway/grpcclient github.com/moby/buildkit/frontend/gateway/pb github.com/moby/buildkit/frontend/subrequests +github.com/moby/buildkit/frontend/subrequests/outline +github.com/moby/buildkit/frontend/subrequests/targets github.com/moby/buildkit/identity github.com/moby/buildkit/session github.com/moby/buildkit/session/auth @@ -591,16 +613,23 @@ github.com/moby/buildkit/solver/llbsolver/file github.com/moby/buildkit/solver/llbsolver/mounts github.com/moby/buildkit/solver/llbsolver/ops github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes +github.com/moby/buildkit/solver/llbsolver/ops/opsutils +github.com/moby/buildkit/solver/llbsolver/proc +github.com/moby/buildkit/solver/llbsolver/provenance github.com/moby/buildkit/solver/pb +github.com/moby/buildkit/solver/result github.com/moby/buildkit/source github.com/moby/buildkit/source/git github.com/moby/buildkit/source/http github.com/moby/buildkit/source/local github.com/moby/buildkit/source/types +github.com/moby/buildkit/sourcepolicy +github.com/moby/buildkit/sourcepolicy/pb github.com/moby/buildkit/util/apicaps github.com/moby/buildkit/util/apicaps/pb github.com/moby/buildkit/util/appdefaults github.com/moby/buildkit/util/archutil +github.com/moby/buildkit/util/attestation github.com/moby/buildkit/util/bklog github.com/moby/buildkit/util/buildinfo github.com/moby/buildkit/util/buildinfo/types @@ -614,6 +643,7 @@ github.com/moby/buildkit/util/flightcontrol github.com/moby/buildkit/util/gitutil github.com/moby/buildkit/util/grpcerrors github.com/moby/buildkit/util/imageutil +github.com/moby/buildkit/util/iohelper github.com/moby/buildkit/util/leaseutil github.com/moby/buildkit/util/network github.com/moby/buildkit/util/overlay @@ -621,6 +651,7 @@ github.com/moby/buildkit/util/progress github.com/moby/buildkit/util/progress/controller github.com/moby/buildkit/util/progress/logs github.com/moby/buildkit/util/pull/pullprogress +github.com/moby/buildkit/util/purl github.com/moby/buildkit/util/push github.com/moby/buildkit/util/resolver github.com/moby/buildkit/util/resolver/config @@ -629,14 +660,17 @@ github.com/moby/buildkit/util/resolver/retryhandler github.com/moby/buildkit/util/rootless/specconv github.com/moby/buildkit/util/sshutil github.com/moby/buildkit/util/stack +github.com/moby/buildkit/util/staticfs github.com/moby/buildkit/util/suggest github.com/moby/buildkit/util/system github.com/moby/buildkit/util/throttle github.com/moby/buildkit/util/tracing +github.com/moby/buildkit/util/tracing/detect github.com/moby/buildkit/util/tracing/exec github.com/moby/buildkit/util/tracing/otlptracegrpc github.com/moby/buildkit/util/tracing/transform github.com/moby/buildkit/util/urlutil +github.com/moby/buildkit/util/wildcard github.com/moby/buildkit/util/winlayers github.com/moby/buildkit/version github.com/moby/buildkit/worker @@ -742,6 +776,10 @@ github.com/moby/term/windows # github.com/morikuni/aec v1.0.0 ## explicit github.com/morikuni/aec +# github.com/onsi/ginkgo v1.16.4 +## explicit; go 1.15 +# github.com/onsi/gomega v1.20.1 +## explicit; go 1.18 # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest @@ -767,6 +805,9 @@ github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalk github.com/opencontainers/selinux/pkg/pwalkdir +# github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 +## explicit; go 1.17 +github.com/package-url/packageurl-go # github.com/pelletier/go-toml v1.9.5 ## explicit; go 1.12 github.com/pelletier/go-toml @@ -802,9 +843,22 @@ github.com/rootless-containers/rootlesskit/pkg/port # github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 ## explicit github.com/sean-/seed +# github.com/secure-systems-lab/go-securesystemslib v0.4.0 +## explicit; go 1.17 +github.com/secure-systems-lab/go-securesystemslib/cjson +github.com/secure-systems-lab/go-securesystemslib/dsse +# github.com/shibumi/go-pathspec v1.3.0 +## explicit; go 1.17 +github.com/shibumi/go-pathspec # github.com/sirupsen/logrus v1.9.0 ## explicit; go 1.13 github.com/sirupsen/logrus +# github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f +## explicit; go 1.13 +github.com/spdx/tools-golang/json +github.com/spdx/tools-golang/spdx/common +github.com/spdx/tools-golang/spdx/v2_2 +github.com/spdx/tools-golang/spdx/v2_3 # github.com/spf13/cobra v1.6.1 ## explicit; go 1.15 github.com/spf13/cobra @@ -814,8 +868,8 @@ github.com/spf13/pflag # github.com/tinylib/msgp v1.1.6 ## explicit; go 1.14 github.com/tinylib/msgp/msgp -# github.com/tonistiigi/fsutil v0.0.0-20220315205639-9ed612626da3 -## explicit; go 1.13 +# github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa +## explicit; go 1.18 github.com/tonistiigi/fsutil github.com/tonistiigi/fsutil/copy github.com/tonistiigi/fsutil/types @@ -900,10 +954,20 @@ go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.7.0 +# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 +## explicit; go 1.16 +go.opentelemetry.io/otel/exporters/otlp/internal/retry # go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 ## explicit; go 1.16 go.opentelemetry.io/otel/exporters/otlp/otlptrace +go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1 +## explicit; go 1.16 +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1 +## explicit; go 1.16 +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp # go.opentelemetry.io/otel/internal/metric v0.27.0 ## explicit; go 1.16 go.opentelemetry.io/otel/internal/metric/global @@ -922,6 +986,7 @@ go.opentelemetry.io/otel/sdk/internal go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace +go.opentelemetry.io/otel/sdk/trace/tracetest # go.opentelemetry.io/otel/trace v1.4.1 ## explicit; go 1.16 go.opentelemetry.io/otel/trace @@ -1094,6 +1159,7 @@ google.golang.org/grpc/credentials/google google.golang.org/grpc/credentials/insecure google.golang.org/grpc/credentials/oauth google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog google.golang.org/grpc/health