vendor: update buildkit to v0.11.2
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
parent
85169a04cf
commit
666334bd48
360 changed files with 35817 additions and 4041 deletions
4
.github/workflows/buildkit.yml
vendored
4
.github/workflows/buildkit.yml
vendored
|
@ -69,9 +69,7 @@ jobs:
|
|||
-
|
||||
name: BuildKit ref
|
||||
run: |
|
||||
# FIXME(thaJeztah) temporarily overriding version to use for tests; remove with the next release of buildkit
|
||||
# echo "BUILDKIT_REF=$(./hack/buildkit-ref)" >> $GITHUB_ENV
|
||||
echo "BUILDKIT_REF=3a391492c9d0b7428b6dcaa18c5aa3b5951fdacd" >> $GITHUB_ENV
|
||||
echo "BUILDKIT_REF=$(./hack/buildkit-ref)" >> $GITHUB_ENV
|
||||
working-directory: moby
|
||||
-
|
||||
name: Checkout BuildKit ${{ env.BUILDKIT_REF }}
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline"
|
||||
localremotecache "github.com/moby/buildkit/cache/remotecache/local"
|
||||
"github.com/moby/buildkit/client"
|
||||
bkconfig "github.com/moby/buildkit/cmd/buildkitd/config"
|
||||
"github.com/moby/buildkit/control"
|
||||
"github.com/moby/buildkit/frontend"
|
||||
dockerfile "github.com/moby/buildkit/frontend/dockerfile/builder"
|
||||
|
@ -38,6 +39,7 @@ import (
|
|||
"github.com/moby/buildkit/util/leaseutil"
|
||||
"github.com/moby/buildkit/worker"
|
||||
"github.com/pkg/errors"
|
||||
"go.etcd.io/bbolt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
|
@ -157,6 +159,11 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
historyDB, err := bbolt.Open(filepath.Join(opt.Root, "history.db"), 0o600, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gcPolicy, err := getGCPolicy(opt.BuilderConfig, root)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get builder GC policy")
|
||||
|
@ -189,6 +196,7 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
|||
Transport: rt,
|
||||
Layers: layers,
|
||||
Platforms: archutil.SupportedPlatforms(true),
|
||||
LeaseManager: lm,
|
||||
}
|
||||
|
||||
wc := &worker.Controller{}
|
||||
|
@ -203,6 +211,14 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
|||
"gateway.v0": gateway.NewGatewayFrontend(wc),
|
||||
}
|
||||
|
||||
var hconf *bkconfig.HistoryConfig
|
||||
if opt.BuilderConfig.History != nil {
|
||||
hconf = &bkconfig.HistoryConfig{
|
||||
MaxAge: opt.BuilderConfig.History.MaxAge,
|
||||
MaxEntries: opt.BuilderConfig.History.MaxEntries,
|
||||
}
|
||||
}
|
||||
|
||||
return control.NewController(control.Opt{
|
||||
SessionManager: opt.SessionManager,
|
||||
WorkerController: wc,
|
||||
|
@ -215,7 +231,11 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) {
|
|||
ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{
|
||||
"inline": inlineremotecache.ResolveCacheExporterFunc(),
|
||||
},
|
||||
Entitlements: getEntitlements(opt.BuilderConfig),
|
||||
Entitlements: getEntitlements(opt.BuilderConfig),
|
||||
LeaseManager: lm,
|
||||
ContentStore: store,
|
||||
HistoryDB: historyDB,
|
||||
HistoryConfig: hconf,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
package buildkit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
@ -68,7 +69,7 @@ type bridgeProvider struct {
|
|||
Root string
|
||||
}
|
||||
|
||||
func (p *bridgeProvider) New() (network.Namespace, error) {
|
||||
func (p *bridgeProvider) New(ctx context.Context, hostname string) (network.Namespace, error) {
|
||||
n, err := p.NetworkByName(networkName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -82,6 +83,10 @@ func (p *bridgeProvider) New() (network.Namespace, error) {
|
|||
return iface, nil
|
||||
}
|
||||
|
||||
func (p *bridgeProvider) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type lnInterface struct {
|
||||
ep *libnetwork.Endpoint
|
||||
sbx *libnetwork.Sandbox
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"github.com/docker/docker/reference"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
"github.com/moby/buildkit/util/compression"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
@ -103,22 +102,18 @@ func (e *imageExporterInstance) Name() string {
|
|||
return "exporting to image"
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Config() exporter.Config {
|
||||
return exporter.Config{
|
||||
Compression: compression.Config{
|
||||
Type: compression.Default,
|
||||
},
|
||||
}
|
||||
func (e *imageExporterInstance) Config() *exporter.Config {
|
||||
return exporter.NewConfig()
|
||||
}
|
||||
|
||||
func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) {
|
||||
func (e *imageExporterInstance) Export(ctx context.Context, inp *exporter.Source, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
|
||||
if len(inp.Refs) > 1 {
|
||||
return nil, fmt.Errorf("exporting multiple references to image store is currently unsupported")
|
||||
return nil, nil, fmt.Errorf("exporting multiple references to image store is currently unsupported")
|
||||
}
|
||||
|
||||
ref := inp.Ref
|
||||
if ref != nil && len(inp.Refs) == 1 {
|
||||
return nil, fmt.Errorf("invalid exporter input: Ref and Refs are mutually exclusive")
|
||||
return nil, nil, fmt.Errorf("invalid exporter input: Ref and Refs are mutually exclusive")
|
||||
}
|
||||
|
||||
// only one loop
|
||||
|
@ -137,14 +132,14 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source,
|
|||
case 1:
|
||||
platformsBytes, ok := inp.Metadata[exptypes.ExporterPlatformsKey]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot export image, missing platforms mapping")
|
||||
return nil, nil, fmt.Errorf("cannot export image, missing platforms mapping")
|
||||
}
|
||||
var p exptypes.Platforms
|
||||
if err := json.Unmarshal(platformsBytes, &p); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse platforms passed to exporter")
|
||||
return nil, nil, errors.Wrapf(err, "failed to parse platforms passed to exporter")
|
||||
}
|
||||
if len(p.Platforms) != len(inp.Refs) {
|
||||
return nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs))
|
||||
return nil, nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs))
|
||||
}
|
||||
config = inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, p.Platforms[0].ID)]
|
||||
if v, ok := inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, p.Platforms[0].ID)]; ok {
|
||||
|
@ -157,16 +152,16 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source,
|
|||
layersDone := oneOffProgress(ctx, "exporting layers")
|
||||
|
||||
if err := ref.Finalize(ctx); err != nil {
|
||||
return nil, layersDone(err)
|
||||
return nil, nil, layersDone(err)
|
||||
}
|
||||
|
||||
if err := ref.Extract(ctx, nil); err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
diffIDs, err := e.opt.Differ.EnsureLayer(ctx, ref.ID())
|
||||
if err != nil {
|
||||
return nil, layersDone(err)
|
||||
return nil, nil, layersDone(err)
|
||||
}
|
||||
|
||||
diffs = make([]digest.Digest, len(diffIDs))
|
||||
|
@ -181,20 +176,20 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source,
|
|||
var err error
|
||||
config, err = emptyImageConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
history, err := parseHistoryFromConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
diffs, history = normalizeLayersAndHistory(diffs, history, ref)
|
||||
|
||||
config, err = patchImageConfig(config, diffs, history, inp.Metadata[exptypes.ExporterInlineCache], buildInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
configDigest := digest.FromBytes(config)
|
||||
|
@ -202,7 +197,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source,
|
|||
configDone := oneOffProgress(ctx, fmt.Sprintf("writing image %s", configDigest))
|
||||
id, err := e.opt.ImageStore.Create(config)
|
||||
if err != nil {
|
||||
return nil, configDone(err)
|
||||
return nil, nil, configDone(err)
|
||||
}
|
||||
_ = configDone(nil)
|
||||
|
||||
|
@ -210,7 +205,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source,
|
|||
for _, targetName := range e.targetNames {
|
||||
tagDone := oneOffProgress(ctx, "naming to "+targetName.String())
|
||||
if err := e.opt.ReferenceStore.AddTag(targetName, digest.Digest(id), true); err != nil {
|
||||
return nil, tagDone(err)
|
||||
return nil, nil, tagDone(err)
|
||||
}
|
||||
_ = tagDone(nil)
|
||||
}
|
||||
|
@ -219,5 +214,5 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source,
|
|||
return map[string]string{
|
||||
exptypes.ExporterImageConfigDigestKey: configDigest.String(),
|
||||
exptypes.ExporterImageDigestKey: id.String(),
|
||||
}, nil
|
||||
}, nil, nil
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/rootfs"
|
||||
"github.com/docker/docker/builder/builder-next/adapters/containerimage"
|
||||
|
@ -39,6 +40,7 @@ import (
|
|||
"github.com/moby/buildkit/util/compression"
|
||||
"github.com/moby/buildkit/util/contentutil"
|
||||
"github.com/moby/buildkit/util/progress"
|
||||
"github.com/moby/buildkit/version"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -46,6 +48,10 @@ import (
|
|||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
func init() {
|
||||
version.Version = "v0.11.0-rc3"
|
||||
}
|
||||
|
||||
const labelCreatedAt = "buildkit/createdat"
|
||||
|
||||
// LayerAccess provides access to a moby layer from a snapshot
|
||||
|
@ -63,6 +69,7 @@ type Opt struct {
|
|||
Snapshotter snapshot.Snapshotter
|
||||
ContentStore content.Store
|
||||
CacheManager cache.Manager
|
||||
LeaseManager leases.Manager
|
||||
ImageSource *containerimage.Source
|
||||
DownloadManager *xfer.LayerDownloadManager
|
||||
V2MetadataService distmetadata.V2MetadataService
|
||||
|
@ -157,17 +164,42 @@ func (w *Worker) GCPolicy() []client.PruneInfo {
|
|||
return w.Opt.GCPolicy
|
||||
}
|
||||
|
||||
// BuildkitVersion returns BuildKit version
|
||||
func (w *Worker) BuildkitVersion() client.BuildkitVersion {
|
||||
return client.BuildkitVersion{
|
||||
Package: version.Package,
|
||||
Version: version.Version + "-moby",
|
||||
Revision: version.Revision,
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the worker and releases all resources
|
||||
func (w *Worker) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ContentStore returns content store
|
||||
func (w *Worker) ContentStore() content.Store {
|
||||
return w.Opt.ContentStore
|
||||
}
|
||||
|
||||
// LeaseManager returns leases.Manager for the worker
|
||||
func (w *Worker) LeaseManager() leases.Manager {
|
||||
return w.Opt.LeaseManager
|
||||
}
|
||||
|
||||
// LoadRef loads a reference by ID
|
||||
func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) {
|
||||
var opts []cache.RefOption
|
||||
if hidden {
|
||||
opts = append(opts, cache.NoUpdateLastUsed)
|
||||
}
|
||||
if id == "" {
|
||||
// results can have nil refs if they are optimized out to be equal to scratch,
|
||||
// i.e. Diff(A,A) == scratch
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return w.CacheManager().Get(ctx, id, nil, opts...)
|
||||
}
|
||||
|
||||
|
|
|
@ -60,6 +60,12 @@ type BuilderGCConfig struct {
|
|||
DefaultKeepStorage string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// BuilderHistoryConfig contains history config for a buildkit builder
|
||||
type BuilderHistoryConfig struct {
|
||||
MaxAge int64 `json:",omitempty"`
|
||||
MaxEntries int64 `json:",omitempty"`
|
||||
}
|
||||
|
||||
// BuilderEntitlements contains settings to enable/disable entitlements
|
||||
type BuilderEntitlements struct {
|
||||
NetworkHost *bool `json:"network-host,omitempty"`
|
||||
|
@ -68,6 +74,7 @@ type BuilderEntitlements struct {
|
|||
|
||||
// BuilderConfig contains config for the builder
|
||||
type BuilderConfig struct {
|
||||
GC BuilderGCConfig `json:",omitempty"`
|
||||
Entitlements BuilderEntitlements `json:",omitempty"`
|
||||
GC BuilderGCConfig `json:",omitempty"`
|
||||
Entitlements BuilderEntitlements `json:",omitempty"`
|
||||
History *BuilderHistoryConfig `json:",omitempty"`
|
||||
}
|
||||
|
|
|
@ -95,8 +95,8 @@ func testBuildWithSession(t *testing.T, client dclient.APIClient, daemonHost str
|
|||
sess, err := session.NewSession(ctx, "foo1", "foo")
|
||||
assert.Check(t, err)
|
||||
|
||||
fsProvider := filesync.NewFSSyncProvider([]filesync.SyncedDir{
|
||||
{Dir: dir},
|
||||
fsProvider := filesync.NewFSSyncProvider(filesync.StaticDirSource{
|
||||
"": {Dir: dir},
|
||||
})
|
||||
sess.Allow(fsProvider)
|
||||
|
||||
|
|
19
vendor.mod
19
vendor.mod
|
@ -56,7 +56,7 @@ require (
|
|||
github.com/klauspost/compress v1.15.12
|
||||
github.com/miekg/dns v1.1.43
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible
|
||||
github.com/moby/buildkit v0.10.6
|
||||
github.com/moby/buildkit v0.11.2
|
||||
github.com/moby/ipvs v1.1.0
|
||||
github.com/moby/locker v1.0.1
|
||||
github.com/moby/patternmatcher v0.5.0
|
||||
|
@ -81,7 +81,7 @@ require (
|
|||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/tonistiigi/fsutil v0.0.0-20221114235510-0127568185cf
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa
|
||||
github.com/tonistiigi/go-archvariant v1.0.0
|
||||
github.com/vbatts/tar-split v0.11.2
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2
|
||||
|
@ -111,13 +111,14 @@ require (
|
|||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.16 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cilium/ebpf v0.7.0 // indirect
|
||||
github.com/container-storage-interface/spec v1.5.0 // indirect
|
||||
github.com/containerd/console v1.0.3 // indirect
|
||||
github.com/containerd/go-runc v1.0.0 // indirect
|
||||
github.com/containerd/stargz-snapshotter v0.11.3 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.3 // indirect
|
||||
github.com/containerd/nydus-snapshotter v0.3.1 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect
|
||||
github.com/containerd/ttrpc v1.1.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
|
@ -141,14 +142,21 @@ require (
|
|||
github.com/hashicorp/go-msgpack v0.5.5 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/in-toto/in-toto-golang v0.5.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
|
||||
github.com/onsi/ginkgo v1.16.4 // indirect
|
||||
github.com/onsi/gomega v1.20.1 // indirect
|
||||
github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 // indirect
|
||||
github.com/philhofer/fwd v1.1.2 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/shibumi/go-pathspec v1.3.0 // indirect
|
||||
github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f // indirect
|
||||
github.com/tinylib/msgp v1.1.6 // indirect
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.6 // indirect
|
||||
|
@ -160,7 +168,10 @@ require (
|
|||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 // indirect
|
||||
go.opentelemetry.io/otel v1.4.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1 // indirect
|
||||
go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v0.27.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.4.1 // indirect
|
||||
|
|
185
vendor.sum
185
vendor.sum
|
@ -1,5 +1,4 @@
|
|||
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
|
@ -64,26 +63,21 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq
|
|||
code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o=
|
||||
code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20221118232415-3345c89a7c72 h1:kq78byqmxX6R9uk4uN3HD2F5tkZJAZMauuLSkNPS8to=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20221118232415-3345c89a7c72/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0=
|
||||
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
|
||||
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
|
@ -98,7 +92,6 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX
|
|||
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||
|
@ -108,16 +101,12 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3
|
|||
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
|
||||
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
|
||||
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
|
||||
github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
|
||||
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
||||
github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY=
|
||||
github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
|
@ -136,7 +125,6 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
|
|||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
|
||||
github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs=
|
||||
|
@ -174,7 +162,6 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.16.16 h1:otZvq9r+xjPL7qU/luX2QdBamiN
|
|||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.16/go.mod h1:Y9iBgT1w2vHtYzJEkwD6FqILjDSsvbxcW/+wIYxyse4=
|
||||
github.com/aws/smithy-go v1.13.1 h1:q09BdpUiaqpothcv393ACfWJJHzlzjB5HaNL1XHKmoQ=
|
||||
github.com/aws/smithy-go v1.13.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
|
@ -194,11 +181,11 @@ github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR
|
|||
github.com/bsphere/le_go v0.0.0-20200109081728-fc06dab2caa8 h1:fcONpniVVbh9+duVZYYbJuc+yGGdLRxTqpk7pTTz/qI=
|
||||
github.com/bsphere/le_go v0.0.0-20200109081728-fc06dab2caa8/go.mod h1:GrjfimWtH8h8EqJSfbO+sTQYV/fAjL/VN7dMeU8XP2Y=
|
||||
github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
|
||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
|
||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
|
@ -242,6 +229,7 @@ github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcK
|
|||
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
|
||||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
|
||||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
|
||||
github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo=
|
||||
github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s=
|
||||
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
|
||||
|
@ -258,7 +246,6 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S
|
|||
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
|
||||
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
|
||||
github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8=
|
||||
github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
|
||||
github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA=
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
|
@ -276,15 +263,12 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX
|
|||
github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
|
||||
github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
|
||||
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
|
||||
github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
|
||||
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
|
||||
github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
|
||||
github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
|
||||
github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE=
|
||||
github.com/containerd/containerd v1.6.16 h1:0H5xH6ABsN7XTrxIAKxFpBkFCBtrZ/OSORhCpUnHjrc=
|
||||
github.com/containerd/containerd v1.6.16/go.mod h1:1RdCUu95+gc2v9t3IL+zIlpClSmew7/0YS8O5eQZrOw=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
|
@ -294,7 +278,6 @@ github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cE
|
|||
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
|
||||
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
|
||||
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
|
||||
github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk=
|
||||
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
|
||||
github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM=
|
||||
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
|
@ -306,8 +289,6 @@ github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU
|
|||
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
||||
github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
|
||||
github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
|
||||
github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
|
||||
github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
|
||||
|
@ -318,15 +299,14 @@ github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak
|
|||
github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
|
||||
github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
|
||||
github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
|
||||
github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4=
|
||||
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
|
||||
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/stargz-snapshotter v0.11.3 h1:D3PoF563XmOBdtfx2G6AkhbHueqwIVPBFn2mrsWLa3w=
|
||||
github.com/containerd/stargz-snapshotter v0.11.3/go.mod h1:2j2EAUyvrLU4D9unYlTIwGhDKQIk74KJ9E71lJsQCVM=
|
||||
github.com/containerd/nydus-snapshotter v0.3.1 h1:b8WahTrPkt3XsabjG2o/leN4fw3HWZYr+qxo/Z8Mfzk=
|
||||
github.com/containerd/nydus-snapshotter v0.3.1/go.mod h1:+8R7NX7vrjlxAgtidnsstwIhpzyTlriYPssTxH++uiM=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.3 h1:k2kN16Px6LYuv++qFqK+JTcYqc8bEVxzGpf8/gFBL5M=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.13.0 h1:fD7AwuVV+B40p0d9qVkH/Au1qhp8hn/HWJHIYjpEcfw=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.13.0/go.mod h1:m+9VaGJGlhCnrcEUod8mYumTmRgblwd3rC5UCEh2Yp0=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
||||
|
@ -347,21 +327,17 @@ github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNR
|
|||
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y=
|
||||
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
|
||||
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
||||
github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
|
||||
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
|
||||
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
|
||||
github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||
github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
|
@ -389,7 +365,6 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S
|
|||
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
|
||||
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
|
||||
github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
|
||||
github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
@ -401,16 +376,13 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
|
|||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v20.10.13+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
|
@ -446,10 +418,7 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.
|
|||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o=
|
||||
github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee h1:v6Eju/FhxsACGNipFEPBZZAzGr1F/jlRQr1qiBw2nEE=
|
||||
|
@ -457,7 +426,6 @@ github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee/go.mod h1:2H9hjfb
|
|||
github.com/fluent/fluent-logger-golang v1.9.0 h1:zUdY44CHX2oIUc7VTNZc+4m+ORuO/mldQDA7czhWXEg=
|
||||
github.com/fluent/fluent-logger-golang v1.9.0/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
|
||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
|
@ -465,7 +433,6 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
|
|||
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
|
||||
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
|
||||
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
|
||||
github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
|
||||
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
|
@ -484,29 +451,23 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
|
|||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
|
||||
|
@ -594,7 +555,6 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
|
|||
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
|
@ -634,8 +594,6 @@ github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99
|
|||
github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk=
|
||||
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
||||
|
@ -657,7 +615,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
|
|||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hanwen/go-fuse/v2 v2.1.1-0.20220112183258-f57e95bda82d/go.mod h1:B1nGE/6RBFyBRC1RRnf23UpwCdyJ31eukw34oAKukAc=
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
|
@ -666,7 +623,6 @@ github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY
|
|||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
|
||||
|
@ -681,7 +637,6 @@ github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHh
|
|||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
|
||||
|
@ -713,14 +668,14 @@ github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH
|
|||
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY=
|
||||
github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
|
||||
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
|
||||
github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062 h1:G1+wBT0dwjIrBdLy0MIG0i+E4CQxEnedHXdauJEIH6g=
|
||||
github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
|
||||
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
|
||||
github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
|
@ -728,7 +683,6 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC
|
|||
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
|
@ -747,7 +701,6 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
|||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
||||
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
|
@ -763,7 +716,6 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
|
||||
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
|
@ -771,7 +723,6 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN
|
|||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
|
@ -779,7 +730,6 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
|
|||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
|
@ -804,8 +754,8 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu
|
|||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
|
||||
github.com/moby/buildkit v0.10.6 h1:DJlEuLIgnu34HQKF4n9Eg6q2YqQVC0eOpMb4p2eRS2w=
|
||||
github.com/moby/buildkit v0.10.6/go.mod h1:tQuuyTWtOb9D+RE425cwOCUkX0/oZ+5iBZ+uWpWQ9bU=
|
||||
github.com/moby/buildkit v0.11.2 h1:hNNsYuRssvFnp/qJ8FifStEUzROl5riPAEwk7cRzMjg=
|
||||
github.com/moby/buildkit v0.11.2/go.mod h1:b5hR8j3BZaOj5+gf6yielP9YLT9mU92zy3zZtdoUTrw=
|
||||
github.com/moby/ipvs v1.1.0 h1:ONN4pGaZQgAx+1Scz5RvWV4Q7Gb+mvfRh3NsPS+1XQQ=
|
||||
github.com/moby/ipvs v1.1.0/go.mod h1:4VJMWuf098bsUMmZEiD4Tjk/O7mOn3l1PTD3s4OoYAs=
|
||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||
|
@ -814,7 +764,6 @@ github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M
|
|||
github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/pubsub v1.0.0 h1:jkp/imWsmJz2f6LyFsk7EkVeN2HxR/HTTOY8kHrsxfA=
|
||||
github.com/moby/pubsub v1.0.0/go.mod h1:bXSO+3h5MNXXCaEG+6/NlAIk7MMZbySZlnB+cUQhKKc=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20230119195359-904c221ac281 h1:E0LdO1cZEXmXrLoojCqEvVCk4cNLWSVotoDbWUmNa8g=
|
||||
github.com/moby/swarmkit/v2 v2.0.0-20230119195359-904c221ac281/go.mod h1:jIgi55SqNJvlQ74bK35NXKWz6JCTexx5h69d0btP2AM=
|
||||
github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs=
|
||||
|
@ -822,19 +771,16 @@ github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwK
|
|||
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||
github.com/moby/sys/mountinfo v0.6.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
|
||||
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
||||
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
|
||||
github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
|
||||
github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI=
|
||||
github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
|
||||
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
|
||||
github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc=
|
||||
github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
|
||||
github.com/moby/term v0.0.0-20221120202655-abb19827d345 h1:J9c53/kxIH+2nTKBEfZYFMlhghtHpIHSXpm5VRGHSnU=
|
||||
github.com/moby/term v0.0.0-20221120202655-abb19827d345/go.mod h1:15ce4BGCFxt7I5NQKT+HV0yEDxmf6fSysfEDiVo3zFM=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
|
@ -866,8 +812,6 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
|
|||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
|
@ -878,8 +822,8 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J
|
|||
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
|
||||
github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU=
|
||||
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
|
||||
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
|
||||
github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo=
|
||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
|
@ -888,7 +832,6 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
|||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1 h1:9iFHD5Kt9hkOfeawBNiEeEaV7bmC4/Z5wJp8E9BptMs=
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1/go.mod h1:K/JAU0m27RFhDRX4PcFdIKntROP6y5Ed6O91aZYDQfs=
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
|
@ -897,7 +840,6 @@ github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h
|
|||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
|
||||
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
|
||||
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
|
||||
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
|
@ -915,13 +857,13 @@ github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuh
|
|||
github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4=
|
||||
github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 h1:DiLBVp4DAcZlBVBEtJpNWZpZVq0AEeCY7Hqk8URVs4o=
|
||||
github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170/go.mod h1:uQd4a7Rh3ZsVg5j0lNyAfyxIeGde9yrlhjF78GzeW0c=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
|
||||
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
|
@ -965,7 +907,6 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+
|
|||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
|
@ -990,22 +931,22 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
|
|||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rootless-containers/rootlesskit v1.1.0 h1:cRaRIYxY8oce4eE/zeAUZhgKu/4tU1p9YHN4+suwV7M=
|
||||
github.com/rootless-containers/rootlesskit v1.1.0/go.mod h1:H+o9ndNe7tS91WqU0/+vpvc+VaCd7TCIWaJjnV0ujUo=
|
||||
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
|
||||
github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
|
||||
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
|
||||
github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI=
|
||||
github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
|
@ -1023,6 +964,9 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9
|
|||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM=
|
||||
github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f h1:9B623Cfs+mclYK6dsae7gLSwuIBHvlgmEup87qpqsAQ=
|
||||
github.com/spdx/tools-golang v0.3.1-0.20230104082527-d6f58551be3f/go.mod h1:VHzvNsKAfAGqs4ZvwRL+7a0dNsL20s7lGui4K9C0xQM=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
|
@ -1042,7 +986,6 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
|||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
|
@ -1065,14 +1008,13 @@ github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOM
|
|||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20221114235510-0127568185cf h1:2n2v98sRhXEG0Kh7+EvctaNIyOim36Ekp4pGDzbuvO8=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20221114235510-0127568185cf/go.mod h1:AvLEd1LEIl64G2Jpgwo7aVV5lGH0ePcKl0ygGIHNYl8=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa h1:XOFp/3aBXlqmOFAg3r6e0qQjPnK5I970LilqX+Is1W8=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20230105215944-fb433841cbfa/go.mod h1:AvLEd1LEIl64G2Jpgwo7aVV5lGH0ePcKl0ygGIHNYl8=
|
||||
github.com/tonistiigi/go-archvariant v1.0.0 h1:5LC1eDWiBNflnTF1prCiX09yfNHIxDC/aukdhCdTyb0=
|
||||
github.com/tonistiigi/go-archvariant v1.0.0/go.mod h1:TxFmO5VS6vMq2kvs3ht04iPXtu2rUT/erOnGFYfk5Ho=
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0=
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
|
@ -1084,13 +1026,11 @@ github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaW
|
|||
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/vishvananda/netns v0.0.2 h1:Cn05BRLm+iRP/DZxyVSsfVyrzgjDbwHwkVt38qvXnNI=
|
||||
github.com/vishvananda/netns v0.0.2/go.mod h1:yitZXdAVI+yPFSb4QUe+VW3vOVl4PZPNcBgbPxAtJxw=
|
||||
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
|
@ -1115,22 +1055,15 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
|||
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
|
||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
|
||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||
go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.6 h1:TXQWYceBKqLp4sa87rcPs11SXxUA/mHwH975v+BDvLU=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
|
||||
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
|
||||
go.etcd.io/etcd/client/v2 v2.305.6/go.mod h1:BHha8XJGe8vCIBfWBpbBLVZ4QjOIlfoouvOwydu63E0=
|
||||
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
|
||||
go.etcd.io/etcd/client/v3 v3.5.6/go.mod h1:f6GRinRMCsFVv9Ht42EyY7nfsVGwrNO0WEoS2pRKzQk=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.6 h1:k1GZrGrfMHy5/cg2bxNGsmLTFisatyhDYCFLRuaavWg=
|
||||
go.etcd.io/etcd/pkg/v3 v3.5.6/go.mod h1:qATwUzDb6MLyGWq2nUj+jwXqZJcxkCuabh0P7Cuff3k=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.6 h1:tOmx6Ym6rn2GpZOrvTGJZciJHek6RnC3U/zNInzIN50=
|
||||
go.etcd.io/etcd/raft/v3 v3.5.6/go.mod h1:wL8kkRGx1Hp8FmZUuHfL3K2/OaGIDaXGr1N7i2G07J0=
|
||||
go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
|
||||
go.etcd.io/etcd/server/v3 v3.5.6 h1:RXuwaB8AMiV62TqcqIt4O4bG8NWjsxOkDJVT3MZI5Ds=
|
||||
go.etcd.io/etcd/server/v3 v3.5.6/go.mod h1:6/Gfe8XTGXQJgLYQ65oGKMfPivb2EASLUSMSWN9Sroo=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||
|
@ -1142,55 +1075,40 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 h1:n9b7AAdbQtQ0k9dm0Dm2/KUcUqtG8i2O15KzNaDze8c=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0/go.mod h1:LsankqVDx4W+RhZNA5uWarULII/MBhF5qwCYxTuyXjs=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 h1:Wjp9vsVSIEyvdiaECfqxY9xBqQ7JaSCGtvHgR4doXZk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0/go.mod h1:vHItvsnJtp7ES++nFLLFBzUWny7fJQSvTlxFcqQGUr4=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 h1:SLme4Porm+UwX0DdHMxlwRt7FzPSE0sys81bet2o0pU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0/go.mod h1:tLYsuf2v8fZreBVwp9gVMhefZlLFZaUiNVSq8QxXRII=
|
||||
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
|
||||
go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU=
|
||||
go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
|
||||
go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk=
|
||||
go.opentelemetry.io/otel v1.4.1 h1:QbINgGDDcoQUoMJa2mMaWno49lja9sHwp6aoa2n3a4g=
|
||||
go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4=
|
||||
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 h1:imIM3vRDMyZK1ypQlQlO+brE22I9lRhJsBDXpDWjlz8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 h1:WPpPsAAs8I2rA47v5u0558meKmmwm1Dj99ZbqCV8sZ8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1/go.mod h1:o5RW5o2pKpJLD5dNTCmjF1DorYwMeFJmb/rKr5sLaa8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1 h1:AxqDiGk8CorEXStMDZF5Hz9vo9Z7ZZ+I5m8JRl/ko40=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1/go.mod h1:c6E4V3/U+miqjs/8l950wggHGL1qzlp0Ypj9xoGrPqo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1 h1:8qOago/OqoFclMUUj/184tZyRdDZFpcejSjbk5Jrl6Y=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1/go.mod h1:VwYo0Hak6Efuy0TXsZs8o1hnV3dHDPNtDbycG0hI8+M=
|
||||
go.opentelemetry.io/otel/internal/metric v0.27.0 h1:9dAVGAfFiiEq5NVB9FUJ5et+btbDQAUIJehJ+ikyryk=
|
||||
go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw=
|
||||
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
|
||||
go.opentelemetry.io/otel/metric v0.27.0 h1:HhJPsGhJoKRSegPQILFbODU56NS/L1UE4fS1sC5kIwQ=
|
||||
go.opentelemetry.io/otel/metric v0.27.0/go.mod h1:raXDJ7uP2/Jc0nVZWQjJtzoyssOYWu/+pjZqRzfvZ7g=
|
||||
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
|
||||
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
|
||||
go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI=
|
||||
go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs=
|
||||
go.opentelemetry.io/otel/sdk v1.4.1 h1:J7EaW71E0v87qflB4cDolaqq3AcujGrtyIPGQoZOB0Y=
|
||||
go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE=
|
||||
go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
|
||||
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
|
||||
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
|
||||
go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk=
|
||||
go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk=
|
||||
go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE=
|
||||
go.opentelemetry.io/otel/trace v1.4.1 h1:O+16qcdTrT7zxv2J6GejTPFinSwA++cYerC5iSiF8EQ=
|
||||
go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg=
|
||||
go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ=
|
||||
go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c=
|
||||
go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
|
@ -1198,7 +1116,6 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
|||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
|
@ -1224,9 +1141,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
|||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE=
|
||||
golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
|
||||
|
@ -1316,14 +1231,10 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
|
@ -1424,7 +1335,6 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1432,7 +1342,6 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1468,10 +1377,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
@ -1495,10 +1401,7 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
|
||||
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1519,7 +1422,6 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb
|
|||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
|
||||
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
@ -1545,7 +1447,6 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn
|
|||
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
|
@ -1684,7 +1585,6 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
|
@ -1770,7 +1670,6 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
|
|||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
|
@ -1804,7 +1703,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
|
|||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
|
@ -1829,7 +1727,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
@ -1848,56 +1745,35 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
|
|||
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
|
||||
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
|
||||
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
|
||||
k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs=
|
||||
k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI=
|
||||
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
|
||||
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
|
||||
k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U=
|
||||
k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
|
||||
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
|
||||
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
|
||||
k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ=
|
||||
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
|
||||
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
|
||||
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
|
||||
k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y=
|
||||
k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0=
|
||||
k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
|
||||
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
|
||||
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
|
||||
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
|
||||
k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI=
|
||||
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
|
||||
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
|
||||
k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
|
||||
k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
|
||||
k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4=
|
||||
k8s.io/cri-api v0.24.0-alpha.3/go.mod h1:c/NLI5Zdyup5+oEYqFO2IE32ptofNiZpS1nL2y51gAg=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
resenje.org/singleflight v0.3.0 h1:USJtsAN6HTUA827ksc+2Kcr7QZ4HBq/z/P8ugVbqKFY=
|
||||
resenje.org/singleflight v0.3.0/go.mod h1:lAgQK7VfjG6/pgredbQfmV0RvG/uVhKo6vSuZ0vCWfk=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
|
@ -1905,12 +1781,9 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
|||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
|
|
25
vendor/github.com/cenkalti/backoff/v4/.gitignore
generated
vendored
Normal file
25
vendor/github.com/cenkalti/backoff/v4/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
# IDEs
|
||||
.idea/
|
10
vendor/github.com/cenkalti/backoff/v4/.travis.yml
generated
vendored
Normal file
10
vendor/github.com/cenkalti/backoff/v4/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.13
|
||||
- 1.x
|
||||
- tip
|
||||
before_install:
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
script:
|
||||
- $HOME/gopath/bin/goveralls -service=travis-ci
|
20
vendor/github.com/cenkalti/backoff/v4/LICENSE
generated
vendored
Normal file
20
vendor/github.com/cenkalti/backoff/v4/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Cenk Altı
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
32
vendor/github.com/cenkalti/backoff/v4/README.md
generated
vendored
Normal file
32
vendor/github.com/cenkalti/backoff/v4/README.md
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls]
|
||||
|
||||
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
|
||||
|
||||
[Exponential backoff][exponential backoff wiki]
|
||||
is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
|
||||
in order to gradually find an acceptable rate.
|
||||
The retries exponentially increase and stop increasing when a certain threshold is met.
|
||||
|
||||
## Usage
|
||||
|
||||
Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
|
||||
|
||||
Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
|
||||
|
||||
## Contributing
|
||||
|
||||
* I would like to keep this library as small as possible.
|
||||
* Please don't send a PR without opening an issue and discussing it first.
|
||||
* If proposed change is not a common use case, I will probably not accept it.
|
||||
|
||||
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
|
||||
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
|
||||
[travis]: https://travis-ci.org/cenkalti/backoff
|
||||
[travis image]: https://travis-ci.org/cenkalti/backoff.png?branch=master
|
||||
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
|
||||
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
|
||||
|
||||
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
|
||||
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
|
||||
|
||||
[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
|
66
vendor/github.com/cenkalti/backoff/v4/backoff.go
generated
vendored
Normal file
66
vendor/github.com/cenkalti/backoff/v4/backoff.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
// Package backoff implements backoff algorithms for retrying operations.
|
||||
//
|
||||
// Use Retry function for retrying operations that may fail.
|
||||
// If Retry does not meet your needs,
|
||||
// copy/paste the function into your project and modify as you wish.
|
||||
//
|
||||
// There is also Ticker type similar to time.Ticker.
|
||||
// You can use it if you need to work with channels.
|
||||
//
|
||||
// See Examples section below for usage examples.
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
// BackOff is a backoff policy for retrying an operation.
|
||||
type BackOff interface {
|
||||
// NextBackOff returns the duration to wait before retrying the operation,
|
||||
// or backoff. Stop to indicate that no more retries should be made.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// duration := backoff.NextBackOff();
|
||||
// if (duration == backoff.Stop) {
|
||||
// // Do not retry operation.
|
||||
// } else {
|
||||
// // Sleep for duration and retry operation.
|
||||
// }
|
||||
//
|
||||
NextBackOff() time.Duration
|
||||
|
||||
// Reset to initial state.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// Stop indicates that no more retries should be made for use in NextBackOff().
|
||||
const Stop time.Duration = -1
|
||||
|
||||
// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
|
||||
// meaning that the operation is retried immediately without waiting, indefinitely.
|
||||
type ZeroBackOff struct{}
|
||||
|
||||
func (b *ZeroBackOff) Reset() {}
|
||||
|
||||
func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
|
||||
|
||||
// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
|
||||
// NextBackOff(), meaning that the operation should never be retried.
|
||||
type StopBackOff struct{}
|
||||
|
||||
func (b *StopBackOff) Reset() {}
|
||||
|
||||
func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
|
||||
|
||||
// ConstantBackOff is a backoff policy that always returns the same backoff delay.
|
||||
// This is in contrast to an exponential backoff policy,
|
||||
// which returns a delay that grows longer as you call NextBackOff() over and over again.
|
||||
type ConstantBackOff struct {
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
func (b *ConstantBackOff) Reset() {}
|
||||
func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
|
||||
|
||||
func NewConstantBackOff(d time.Duration) *ConstantBackOff {
|
||||
return &ConstantBackOff{Interval: d}
|
||||
}
|
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
Normal file
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BackOffContext is a backoff policy that stops retrying after the context
|
||||
// is canceled.
|
||||
type BackOffContext interface { // nolint: golint
|
||||
BackOff
|
||||
Context() context.Context
|
||||
}
|
||||
|
||||
type backOffContext struct {
|
||||
BackOff
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// WithContext returns a BackOffContext with context ctx
|
||||
//
|
||||
// ctx must not be nil
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
|
||||
if b, ok := b.(*backOffContext); ok {
|
||||
return &backOffContext{
|
||||
BackOff: b.BackOff,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
return &backOffContext{
|
||||
BackOff: b,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func getContext(b BackOff) context.Context {
|
||||
if cb, ok := b.(BackOffContext); ok {
|
||||
return cb.Context()
|
||||
}
|
||||
if tb, ok := b.(*backOffTries); ok {
|
||||
return getContext(tb.delegate)
|
||||
}
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (b *backOffContext) Context() context.Context {
|
||||
return b.ctx
|
||||
}
|
||||
|
||||
func (b *backOffContext) NextBackOff() time.Duration {
|
||||
select {
|
||||
case <-b.ctx.Done():
|
||||
return Stop
|
||||
default:
|
||||
return b.BackOff.NextBackOff()
|
||||
}
|
||||
}
|
158
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
Normal file
158
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
Normal file
|
@ -0,0 +1,158 @@
|
|||
package backoff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||
period for each retry attempt using a randomization function that grows exponentially.
|
||||
|
||||
NextBackOff() is calculated using the following formula:
|
||||
|
||||
randomized interval =
|
||||
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||
|
||||
In other words NextBackOff() will range between the randomization factor
|
||||
percentage below and above the retry interval.
|
||||
|
||||
For example, given the following parameters:
|
||||
|
||||
RetryInterval = 2
|
||||
RandomizationFactor = 0.5
|
||||
Multiplier = 2
|
||||
|
||||
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||
|
||||
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||
|
||||
If the time elapsed since an ExponentialBackOff instance is created goes past the
|
||||
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
|
||||
|
||||
The elapsed time can be reset by calling Reset().
|
||||
|
||||
Example: Given the following default arguments, for 10 tries the sequence will be,
|
||||
and assuming we go over the MaxElapsedTime on the 10th try:
|
||||
|
||||
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||
|
||||
1 0.5 [0.25, 0.75]
|
||||
2 0.75 [0.375, 1.125]
|
||||
3 1.125 [0.562, 1.687]
|
||||
4 1.687 [0.8435, 2.53]
|
||||
5 2.53 [1.265, 3.795]
|
||||
6 3.795 [1.897, 5.692]
|
||||
7 5.692 [2.846, 8.538]
|
||||
8 8.538 [4.269, 12.807]
|
||||
9 12.807 [6.403, 19.210]
|
||||
10 19.210 backoff.Stop
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
type ExponentialBackOff struct {
|
||||
InitialInterval time.Duration
|
||||
RandomizationFactor float64
|
||||
Multiplier float64
|
||||
MaxInterval time.Duration
|
||||
// After MaxElapsedTime the ExponentialBackOff returns Stop.
|
||||
// It never stops if MaxElapsedTime == 0.
|
||||
MaxElapsedTime time.Duration
|
||||
Stop time.Duration
|
||||
Clock Clock
|
||||
|
||||
currentInterval time.Duration
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
// Clock is an interface that returns current time for BackOff.
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const (
|
||||
DefaultInitialInterval = 500 * time.Millisecond
|
||||
DefaultRandomizationFactor = 0.5
|
||||
DefaultMultiplier = 1.5
|
||||
DefaultMaxInterval = 60 * time.Second
|
||||
DefaultMaxElapsedTime = 15 * time.Minute
|
||||
)
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff() *ExponentialBackOff {
|
||||
b := &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
Multiplier: DefaultMultiplier,
|
||||
MaxInterval: DefaultMaxInterval,
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
Stop: Stop,
|
||||
Clock: SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
type systemClock struct{}
|
||||
|
||||
func (t systemClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// SystemClock implements Clock interface that uses time.Now().
|
||||
var SystemClock = systemClock{}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
b.startTime = b.Clock.Now()
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
// Make sure we have not gone over the maximum elapsed time.
|
||||
elapsed := b.GetElapsedTime()
|
||||
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||
b.incrementCurrentInterval()
|
||||
if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
|
||||
return b.Stop
|
||||
}
|
||||
return next
|
||||
}
|
||||
|
||||
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
|
||||
// is created and is reset when Reset() is called.
|
||||
//
|
||||
// The elapsed time is computed using time.Now().UnixNano(). It is
|
||||
// safe to call even while the backoff policy is used by a running
|
||||
// ticker.
|
||||
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
|
||||
return b.Clock.Now().Sub(b.startTime)
|
||||
}
|
||||
|
||||
// Increments the current interval by multiplying it with the multiplier.
|
||||
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||
b.currentInterval = b.MaxInterval
|
||||
} else {
|
||||
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a random value from the following interval:
|
||||
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||
var delta = randomizationFactor * float64(currentInterval)
|
||||
var minInterval = float64(currentInterval) - delta
|
||||
var maxInterval = float64(currentInterval) + delta
|
||||
|
||||
// Get a random value from the range [minInterval, maxInterval].
|
||||
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||
}
|
112
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
Normal file
112
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
package backoff
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An Operation is executing by Retry() or RetryNotify().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type Operation func() error
|
||||
|
||||
// Notify is a notify-on-error function. It receives an operation error and
|
||||
// backoff delay if the operation failed (with an error).
|
||||
//
|
||||
// NOTE that if the backoff policy stated to stop retrying,
|
||||
// the notify function isn't called.
|
||||
type Notify func(error, time.Duration)
|
||||
|
||||
// Retry the operation o until it does not return error or BackOff stops.
|
||||
// o is guaranteed to be run at least once.
|
||||
//
|
||||
// If o returns a *PermanentError, the operation is not retried, and the
|
||||
// wrapped error is returned.
|
||||
//
|
||||
// Retry sleeps the goroutine for the duration returned by BackOff after a
|
||||
// failed operation returns.
|
||||
func Retry(o Operation, b BackOff) error {
|
||||
return RetryNotify(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryNotify calls notify function with the error and wait duration
|
||||
// for each failed attempt before sleep.
|
||||
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
return RetryNotifyWithTimer(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
|
||||
// for each failed attempt before sleep.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
|
||||
var err error
|
||||
var next time.Duration
|
||||
if t == nil {
|
||||
t = &defaultTimer{}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
t.Stop()
|
||||
}()
|
||||
|
||||
ctx := getContext(b)
|
||||
|
||||
b.Reset()
|
||||
for {
|
||||
if err = operation(); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var permanent *PermanentError
|
||||
if errors.As(err, &permanent) {
|
||||
return permanent.Err
|
||||
}
|
||||
|
||||
if next = b.NextBackOff(); next == Stop {
|
||||
if cerr := ctx.Err(); cerr != nil {
|
||||
return cerr
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if notify != nil {
|
||||
notify(err, next)
|
||||
}
|
||||
|
||||
t.Start(next)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-t.C():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PermanentError signals that the operation should not be retried.
|
||||
type PermanentError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PermanentError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
func (e *PermanentError) Is(target error) bool {
|
||||
_, ok := target.(*PermanentError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &PermanentError{
|
||||
Err: err,
|
||||
}
|
||||
}
|
97
vendor/github.com/cenkalti/backoff/v4/ticker.go
generated
vendored
Normal file
97
vendor/github.com/cenkalti/backoff/v4/ticker.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
|
||||
//
|
||||
// Ticks will continue to arrive when the previous operation is still running,
|
||||
// so operations that take a while to fail could run in quick succession.
|
||||
type Ticker struct {
|
||||
C <-chan time.Time
|
||||
c chan time.Time
|
||||
b BackOff
|
||||
ctx context.Context
|
||||
timer Timer
|
||||
stop chan struct{}
|
||||
stopOnce sync.Once
|
||||
}
|
||||
|
||||
// NewTicker returns a new Ticker containing a channel that will send
|
||||
// the time at times specified by the BackOff argument. Ticker is
|
||||
// guaranteed to tick at least once. The channel is closed when Stop
|
||||
// method is called or BackOff stops. It is not safe to manipulate the
|
||||
// provided backoff policy (notably calling NextBackOff or Reset)
|
||||
// while the ticker is running.
|
||||
func NewTicker(b BackOff) *Ticker {
|
||||
return NewTickerWithTimer(b, &defaultTimer{})
|
||||
}
|
||||
|
||||
// NewTickerWithTimer returns a new Ticker with a custom timer.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
|
||||
if timer == nil {
|
||||
timer = &defaultTimer{}
|
||||
}
|
||||
c := make(chan time.Time)
|
||||
t := &Ticker{
|
||||
C: c,
|
||||
c: c,
|
||||
b: b,
|
||||
ctx: getContext(b),
|
||||
timer: timer,
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
t.b.Reset()
|
||||
go t.run()
|
||||
return t
|
||||
}
|
||||
|
||||
// Stop turns off a ticker. After Stop, no more ticks will be sent.
|
||||
func (t *Ticker) Stop() {
|
||||
t.stopOnce.Do(func() { close(t.stop) })
|
||||
}
|
||||
|
||||
func (t *Ticker) run() {
|
||||
c := t.c
|
||||
defer close(c)
|
||||
|
||||
// Ticker is guaranteed to tick at least once.
|
||||
afterC := t.send(time.Now())
|
||||
|
||||
for {
|
||||
if afterC == nil {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case tick := <-afterC:
|
||||
afterC = t.send(tick)
|
||||
case <-t.stop:
|
||||
t.c = nil // Prevent future ticks from being sent to the channel.
|
||||
return
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Ticker) send(tick time.Time) <-chan time.Time {
|
||||
select {
|
||||
case t.c <- tick:
|
||||
case <-t.stop:
|
||||
return nil
|
||||
}
|
||||
|
||||
next := t.b.NextBackOff()
|
||||
if next == Stop {
|
||||
t.Stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
t.timer.Start(next)
|
||||
return t.timer.C()
|
||||
}
|
35
vendor/github.com/cenkalti/backoff/v4/timer.go
generated
vendored
Normal file
35
vendor/github.com/cenkalti/backoff/v4/timer.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
type Timer interface {
|
||||
Start(duration time.Duration)
|
||||
Stop()
|
||||
C() <-chan time.Time
|
||||
}
|
||||
|
||||
// defaultTimer implements Timer interface using time.Timer
|
||||
type defaultTimer struct {
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// C returns the timers channel which receives the current time when the timer fires.
|
||||
func (t *defaultTimer) C() <-chan time.Time {
|
||||
return t.timer.C
|
||||
}
|
||||
|
||||
// Start starts the timer to fire after the given duration
|
||||
func (t *defaultTimer) Start(duration time.Duration) {
|
||||
if t.timer == nil {
|
||||
t.timer = time.NewTimer(duration)
|
||||
} else {
|
||||
t.timer.Reset(duration)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop is called when the timer is not used anymore and resources may be freed.
|
||||
func (t *defaultTimer) Stop() {
|
||||
if t.timer != nil {
|
||||
t.timer.Stop()
|
||||
}
|
||||
}
|
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
Normal file
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
/*
|
||||
WithMaxRetries creates a wrapper around another BackOff, which will
|
||||
return Stop if NextBackOff() has been called too many times since
|
||||
the last time Reset() was called
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
func WithMaxRetries(b BackOff, max uint64) BackOff {
|
||||
return &backOffTries{delegate: b, maxTries: max}
|
||||
}
|
||||
|
||||
type backOffTries struct {
|
||||
delegate BackOff
|
||||
maxTries uint64
|
||||
numTries uint64
|
||||
}
|
||||
|
||||
func (b *backOffTries) NextBackOff() time.Duration {
|
||||
if b.maxTries == 0 {
|
||||
return Stop
|
||||
}
|
||||
if b.maxTries > 0 {
|
||||
if b.maxTries <= b.numTries {
|
||||
return Stop
|
||||
}
|
||||
b.numTries++
|
||||
}
|
||||
return b.delegate.NextBackOff()
|
||||
}
|
||||
|
||||
func (b *backOffTries) Reset() {
|
||||
b.numTries = 0
|
||||
b.delegate.Reset()
|
||||
}
|
92
vendor/github.com/containerd/containerd/contrib/seccomp/kernelversion/kernel_linux.go
generated
vendored
Normal file
92
vendor/github.com/containerd/containerd/contrib/seccomp/kernelversion/kernel_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
File copied and customized based on
|
||||
https://github.com/moby/moby/tree/v20.10.14/profiles/seccomp/kernel_linux.go
|
||||
*/
|
||||
|
||||
package kernelversion
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// KernelVersion holds information about the kernel.
|
||||
type KernelVersion struct {
|
||||
Kernel uint64 // Version of the Kernel (i.e., the "4" in "4.1.2-generic")
|
||||
Major uint64 // Major revision of the Kernel (i.e., the "1" in "4.1.2-generic")
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer for KernelVersion
|
||||
func (k *KernelVersion) String() string {
|
||||
if k.Kernel > 0 || k.Major > 0 {
|
||||
return fmt.Sprintf("%d.%d", k.Kernel, k.Major)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var (
|
||||
currentKernelVersion *KernelVersion
|
||||
kernelVersionError error
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
// getKernelVersion gets the current kernel version.
|
||||
func getKernelVersion() (*KernelVersion, error) {
|
||||
once.Do(func() {
|
||||
var uts unix.Utsname
|
||||
if err := unix.Uname(&uts); err != nil {
|
||||
return
|
||||
}
|
||||
// Remove the \x00 from the release for Atoi to parse correctly
|
||||
currentKernelVersion, kernelVersionError = parseRelease(string(uts.Release[:bytes.IndexByte(uts.Release[:], 0)]))
|
||||
})
|
||||
return currentKernelVersion, kernelVersionError
|
||||
}
|
||||
|
||||
// parseRelease parses a string and creates a KernelVersion based on it.
|
||||
func parseRelease(release string) (*KernelVersion, error) {
|
||||
var version = KernelVersion{}
|
||||
|
||||
// We're only make sure we get the "kernel" and "major revision". Sometimes we have
|
||||
// 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64.
|
||||
_, err := fmt.Sscanf(release, "%d.%d", &version.Kernel, &version.Major)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse kernel version %q: %w", release, err)
|
||||
}
|
||||
return &version, nil
|
||||
}
|
||||
|
||||
// GreaterEqualThan checks if the host's kernel version is greater than, or
|
||||
// equal to the given kernel version v. Only "kernel version" and "major revision"
|
||||
// can be specified (e.g., "3.12") and will be taken into account, which means
|
||||
// that 3.12.25-gentoo and 3.12-1-amd64 are considered equal (kernel: 3, major: 12).
|
||||
func GreaterEqualThan(minVersion KernelVersion) (bool, error) {
|
||||
kv, err := getKernelVersion()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if kv.Kernel > minVersion.Kernel {
|
||||
return true, nil
|
||||
}
|
||||
if kv.Kernel == minVersion.Kernel && kv.Major >= minVersion.Major {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
|
@ -1,3 +1,6 @@
|
|||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
|
@ -14,25 +17,26 @@
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// =====
|
||||
// NOTE: This file is ported from https://github.com/containerd/containerd/blob/v1.5.2/snapshots/overlay/overlayutils/check.go
|
||||
// TODO: import this from containerd package once we drop support to continerd v1.4.x
|
||||
// =====
|
||||
|
||||
package overlayutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
kernel "github.com/containerd/containerd/contrib/seccomp/kernelversion"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/mount"
|
||||
userns "github.com/containerd/containerd/sys"
|
||||
"github.com/containerd/containerd/pkg/userns"
|
||||
"github.com/containerd/continuity/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
// see https://man7.org/linux/man-pages/man2/statfs.2.html
|
||||
tmpfsMagic = 0x01021994
|
||||
)
|
||||
|
||||
// SupportsMultipleLowerDir checks if the system supports multiple lowerdirs,
|
||||
// which is required for the overlay snapshotter. On 4.x kernels, multiple lowerdirs
|
||||
// are always available (so this check isn't needed), and backported to RHEL and
|
||||
|
@ -41,7 +45,7 @@ import (
|
|||
//
|
||||
// Ported from moby overlay2.
|
||||
func SupportsMultipleLowerDir(d string) error {
|
||||
td, err := ioutil.TempDir(d, "multiple-lowerdir-check")
|
||||
td, err := os.MkdirTemp(d, "multiple-lowerdir-check")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -90,6 +94,21 @@ func Supported(root string) error {
|
|||
return SupportsMultipleLowerDir(root)
|
||||
}
|
||||
|
||||
// IsPathOnTmpfs returns whether the path is on a tmpfs or not.
|
||||
//
|
||||
// It uses statfs to check if the fs type is TMPFS_MAGIC (0x01021994)
|
||||
// see https://man7.org/linux/man-pages/man2/statfs.2.html
|
||||
func IsPathOnTmpfs(d string) bool {
|
||||
stat := syscall.Statfs_t{}
|
||||
err := syscall.Statfs(d, &stat)
|
||||
if err != nil {
|
||||
log.L.WithError(err).Warnf("Could not retrieve statfs for %v", d)
|
||||
return false
|
||||
}
|
||||
|
||||
return stat.Type == tmpfsMagic
|
||||
}
|
||||
|
||||
// NeedsUserXAttr returns whether overlayfs should be mounted with the "userxattr" mount option.
|
||||
//
|
||||
// The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11.
|
||||
|
@ -116,10 +135,19 @@ func NeedsUserXAttr(d string) (bool, error) {
|
|||
return false, nil
|
||||
}
|
||||
|
||||
// TODO: add fast path for kernel >= 5.11 .
|
||||
// userxattr not permitted on tmpfs https://man7.org/linux/man-pages/man5/tmpfs.5.html
|
||||
if IsPathOnTmpfs(d) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Fast path on kernels >= 5.11
|
||||
//
|
||||
// Keep in mind that distro vendors might be going to backport the patch to older kernels.
|
||||
// So we can't completely remove the check.
|
||||
// Keep in mind that distro vendors might be going to backport the patch to older kernels
|
||||
// so we can't completely remove the "slow path".
|
||||
fiveDotEleven := kernel.KernelVersion{Kernel: 5, Major: 11}
|
||||
if ok, err := kernel.GreaterEqualThan(fiveDotEleven); err == nil && ok {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
tdRoot := filepath.Join(d, "userxattr-check")
|
||||
if err := os.RemoveAll(tdRoot); err != nil {
|
||||
|
@ -136,7 +164,7 @@ func NeedsUserXAttr(d string) (bool, error) {
|
|||
}
|
||||
}()
|
||||
|
||||
td, err := ioutil.TempDir(tdRoot, "")
|
||||
td, err := os.MkdirTemp(tdRoot, "")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
27
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go
generated
vendored
Normal file
27
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/constant.go
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* Copyright (c) 2022. Nydus Developers. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package converter
|
||||
|
||||
const (
|
||||
ManifestOSFeatureNydus = "nydus.remoteimage.v1"
|
||||
MediaTypeNydusBlob = "application/vnd.oci.image.layer.nydus.blob.v1"
|
||||
BootstrapFileNameInLayer = "image/image.boot"
|
||||
|
||||
ManifestNydusCache = "containerd.io/snapshot/nydus-cache"
|
||||
|
||||
LayerAnnotationFSVersion = "containerd.io/snapshot/nydus-fs-version"
|
||||
LayerAnnotationNydusBlob = "containerd.io/snapshot/nydus-blob"
|
||||
LayerAnnotationNydusBlobDigest = "containerd.io/snapshot/nydus-blob-digest"
|
||||
LayerAnnotationNydusBlobSize = "containerd.io/snapshot/nydus-blob-size"
|
||||
LayerAnnotationNydusBlobIDs = "containerd.io/snapshot/nydus-blob-ids"
|
||||
LayerAnnotationNydusBootstrap = "containerd.io/snapshot/nydus-bootstrap"
|
||||
LayerAnnotationNydusSourceChainID = "containerd.io/snapshot/nydus-source-chainid"
|
||||
|
||||
LayerAnnotationNydusReferenceBlobIDs = "containerd.io/snapshot/nydus-reference-blob-ids"
|
||||
|
||||
LayerAnnotationUncompressed = "containerd.io/uncompressed"
|
||||
)
|
839
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go
generated
vendored
Normal file
839
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,839 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
* Copyright (c) 2022. Nydus Developers. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package converter
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/archive"
|
||||
"github.com/containerd/containerd/archive/compression"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/images/converter"
|
||||
"github.com/containerd/containerd/labels"
|
||||
"github.com/containerd/fifo"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/containerd/nydus-snapshotter/pkg/converter/tool"
|
||||
"github.com/containerd/nydus-snapshotter/pkg/errdefs"
|
||||
)
|
||||
|
||||
const bootstrapNameInTar = "image.boot"
|
||||
const blobNameInTar = "image.blob"
|
||||
|
||||
const envNydusBuilder = "NYDUS_BUILDER"
|
||||
const envNydusWorkDir = "NYDUS_WORKDIR"
|
||||
|
||||
const configGCLabelKey = "containerd.io/gc.ref.content.config"
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buffer := make([]byte, 1<<20)
|
||||
return &buffer
|
||||
},
|
||||
}
|
||||
|
||||
func getBuilder(specifiedPath string) string {
|
||||
if specifiedPath != "" {
|
||||
return specifiedPath
|
||||
}
|
||||
|
||||
builderPath := os.Getenv(envNydusBuilder)
|
||||
if builderPath != "" {
|
||||
return builderPath
|
||||
}
|
||||
|
||||
return "nydus-image"
|
||||
}
|
||||
|
||||
func ensureWorkDir(specifiedBasePath string) (string, error) {
|
||||
var baseWorkDir string
|
||||
|
||||
if specifiedBasePath != "" {
|
||||
baseWorkDir = specifiedBasePath
|
||||
} else {
|
||||
baseWorkDir = os.Getenv(envNydusWorkDir)
|
||||
}
|
||||
if baseWorkDir == "" {
|
||||
baseWorkDir = os.TempDir()
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(baseWorkDir, 0750); err != nil {
|
||||
return "", errors.Wrapf(err, "create base directory %s", baseWorkDir)
|
||||
}
|
||||
|
||||
workDirPath, err := os.MkdirTemp(baseWorkDir, "nydus-converter-")
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "create work directory")
|
||||
}
|
||||
|
||||
return workDirPath, nil
|
||||
}
|
||||
|
||||
// Unpack a OCI formatted tar stream into a directory.
|
||||
func unpackOciTar(ctx context.Context, dst string, reader io.Reader) error {
|
||||
ds, err := compression.DecompressStream(reader)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unpack stream")
|
||||
}
|
||||
defer ds.Close()
|
||||
|
||||
if _, err := archive.Apply(
|
||||
ctx,
|
||||
dst,
|
||||
ds,
|
||||
archive.WithConvertWhiteout(func(hdr *tar.Header, file string) (bool, error) {
|
||||
// Keep to extract all whiteout files.
|
||||
return true, nil
|
||||
}),
|
||||
); err != nil {
|
||||
return errors.Wrap(err, "apply with convert whiteout")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unpack a Nydus formatted tar stream into a directory.
|
||||
func unpackNydusTar(ctx context.Context, bootDst, blobDst string, ra content.ReaderAt) error {
|
||||
boot, err := os.OpenFile(bootDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "write to bootstrap %s", bootDst)
|
||||
}
|
||||
defer boot.Close()
|
||||
|
||||
if err = unpackBootstrapFromNydusTar(ctx, ra, boot); err != nil {
|
||||
return errors.Wrap(err, "unpack bootstrap from nydus")
|
||||
}
|
||||
|
||||
blob, err := os.OpenFile(blobDst, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "write to blob %s", blobDst)
|
||||
}
|
||||
defer blob.Close()
|
||||
|
||||
if err = unpackBlobFromNydusTar(ctx, ra, blob); err != nil {
|
||||
return errors.Wrap(err, "unpack blob from nydus")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unpack the bootstrap from nydus formatted tar stream (blob + bootstrap).
|
||||
// The nydus formatted tar stream is a tar-like structure that arranges the
|
||||
// data as follows:
|
||||
//
|
||||
// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header`
|
||||
func unpackBootstrapFromNydusTar(ctx context.Context, ra content.ReaderAt, target io.Writer) error {
|
||||
cur := ra.Size()
|
||||
reader := newSeekReader(ra)
|
||||
|
||||
const headerSize = 512
|
||||
|
||||
// Seek from tail to head of nydus formatted tar stream to find nydus
|
||||
// bootstrap data.
|
||||
for {
|
||||
if headerSize > cur {
|
||||
return fmt.Errorf("invalid tar format at pos %d", cur)
|
||||
}
|
||||
|
||||
// Try to seek to the part of tar header.
|
||||
var err error
|
||||
cur, err = reader.Seek(cur-headerSize, io.SeekCurrent)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize)
|
||||
}
|
||||
|
||||
tr := tar.NewReader(reader)
|
||||
// Parse tar header.
|
||||
hdr, err := tr.Next()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "parse tar header")
|
||||
}
|
||||
|
||||
if hdr.Name == bootstrapNameInTar {
|
||||
// Try to seek to the part of tar data (bootstrap_data).
|
||||
if hdr.Size > cur {
|
||||
return fmt.Errorf("invalid tar format at pos %d", cur)
|
||||
}
|
||||
bootstrapOffset := cur - hdr.Size
|
||||
_, err = reader.Seek(bootstrapOffset, io.SeekStart)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "seek to bootstrap data offset")
|
||||
}
|
||||
|
||||
// Copy tar data (bootstrap_data) to provided target writer.
|
||||
if _, err := io.CopyN(target, reader, hdr.Size); err != nil {
|
||||
return errors.Wrap(err, "copy bootstrap data to reader")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if cur == hdr.Size {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("can't find bootstrap in nydus tar")
|
||||
}
|
||||
|
||||
// Unpack the blob from nydus formatted tar stream (blob + bootstrap).
|
||||
// The nydus formatted tar stream is a tar-like structure that arranges the
|
||||
// data as follows:
|
||||
//
|
||||
// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header`
|
||||
func unpackBlobFromNydusTar(ctx context.Context, ra content.ReaderAt, target io.Writer) error {
|
||||
cur := ra.Size()
|
||||
reader := newSeekReader(ra)
|
||||
|
||||
const headerSize = 512
|
||||
|
||||
// Seek from tail to head of nydus formatted tar stream to find nydus
|
||||
// bootstrap data.
|
||||
for {
|
||||
if headerSize > cur {
|
||||
break
|
||||
}
|
||||
|
||||
// Try to seek to the part of tar header.
|
||||
var err error
|
||||
cur, err = reader.Seek(cur-headerSize, io.SeekStart)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "seek to %d for tar header", cur-headerSize)
|
||||
}
|
||||
|
||||
tr := tar.NewReader(reader)
|
||||
// Parse tar header.
|
||||
hdr, err := tr.Next()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "parse tar header")
|
||||
}
|
||||
|
||||
if hdr.Name == bootstrapNameInTar {
|
||||
if hdr.Size > cur {
|
||||
return fmt.Errorf("invalid tar format at pos %d", cur)
|
||||
}
|
||||
cur, err = reader.Seek(cur-hdr.Size, io.SeekStart)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "seek to bootstrap data offset")
|
||||
}
|
||||
} else if hdr.Name == blobNameInTar {
|
||||
if hdr.Size > cur {
|
||||
return fmt.Errorf("invalid tar format at pos %d", cur)
|
||||
}
|
||||
_, err = reader.Seek(cur-hdr.Size, io.SeekStart)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "seek to blob data offset")
|
||||
}
|
||||
if _, err := io.CopyN(target, reader, hdr.Size); err != nil {
|
||||
return errors.Wrap(err, "copy blob data to reader")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Pack converts an OCI tar stream to nydus formatted stream with a tar-like
|
||||
// structure that arranges the data as follows:
|
||||
//
|
||||
// `blob_data | blob_tar_header | bootstrap_data | bootstrap_tar_header`
|
||||
//
|
||||
// The caller should write OCI tar stream into the returned `io.WriteCloser`,
|
||||
// then the Pack method will write the nydus formatted stream to `dest`
|
||||
// provided by the caller.
|
||||
//
|
||||
// Important: the caller must check `io.WriteCloser.Close() == nil` to ensure
|
||||
// the conversion workflow is finished.
|
||||
func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, error) {
|
||||
workDir, err := ensureWorkDir(opt.WorkDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ensure work directory")
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
os.RemoveAll(workDir)
|
||||
}
|
||||
}()
|
||||
|
||||
sourceDir := filepath.Join(workDir, "source")
|
||||
if err := os.MkdirAll(sourceDir, 0755); err != nil {
|
||||
return nil, errors.Wrap(err, "create source directory")
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
unpackDone := make(chan bool, 1)
|
||||
go func() {
|
||||
if err := unpackOciTar(ctx, sourceDir, pr); err != nil {
|
||||
pr.CloseWithError(errors.Wrapf(err, "unpack to %s", sourceDir))
|
||||
close(unpackDone)
|
||||
return
|
||||
}
|
||||
unpackDone <- true
|
||||
}()
|
||||
|
||||
wc := newWriteCloser(pw, func() error {
|
||||
defer func() {
|
||||
os.RemoveAll(workDir)
|
||||
}()
|
||||
|
||||
// Because PipeWriter#Close is called does not mean that the PipeReader
|
||||
// has finished reading all the data, and unpack may not be complete yet,
|
||||
// so we need to wait for that here.
|
||||
<-unpackDone
|
||||
|
||||
blobPath := filepath.Join(workDir, "blob")
|
||||
blobFifo, err := fifo.OpenFifo(ctx, blobPath, syscall.O_CREAT|syscall.O_RDONLY|syscall.O_NONBLOCK, 0644)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "create fifo file")
|
||||
}
|
||||
defer blobFifo.Close()
|
||||
|
||||
go func() {
|
||||
err := tool.Pack(tool.PackOption{
|
||||
BuilderPath: getBuilder(opt.BuilderPath),
|
||||
|
||||
BlobPath: blobPath,
|
||||
FsVersion: opt.FsVersion,
|
||||
SourcePath: sourceDir,
|
||||
ChunkDictPath: opt.ChunkDictPath,
|
||||
PrefetchPatterns: opt.PrefetchPatterns,
|
||||
Compressor: opt.Compressor,
|
||||
Timeout: opt.Timeout,
|
||||
})
|
||||
if err != nil {
|
||||
pw.CloseWithError(errors.Wrapf(err, "convert blob for %s", sourceDir))
|
||||
blobFifo.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
buffer := bufPool.Get().(*[]byte)
|
||||
defer bufPool.Put(buffer)
|
||||
if _, err := io.CopyBuffer(dest, blobFifo, *buffer); err != nil {
|
||||
return errors.Wrap(err, "pack nydus tar")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return wc, nil
|
||||
}
|
||||
|
||||
// Merge multiple nydus bootstraps (from each layer of image) to a final
|
||||
// bootstrap. And due to the possibility of enabling the `ChunkDictPath`
|
||||
// option causes the data deduplication, it will return the actual blob
|
||||
// digests referenced by the bootstrap.
|
||||
func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption) ([]digest.Digest, error) {
|
||||
workDir, err := ensureWorkDir(opt.WorkDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ensure work directory")
|
||||
}
|
||||
defer os.RemoveAll(workDir)
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
sourceBootstrapPaths := []string{}
|
||||
for idx := range layers {
|
||||
sourceBootstrapPaths = append(sourceBootstrapPaths, filepath.Join(workDir, layers[idx].Digest.Hex()))
|
||||
eg.Go(func(idx int) func() error {
|
||||
return func() error {
|
||||
layer := layers[idx]
|
||||
|
||||
// Use the hex hash string of whole tar blob as the bootstrap name.
|
||||
bootstrap, err := os.Create(filepath.Join(workDir, layer.Digest.Hex()))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create source bootstrap")
|
||||
}
|
||||
defer bootstrap.Close()
|
||||
|
||||
if err := unpackBootstrapFromNydusTar(ctx, layer.ReaderAt, bootstrap); err != nil {
|
||||
return errors.Wrap(err, "unpack nydus tar")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}(idx))
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, errors.Wrap(err, "unpack all bootstraps")
|
||||
}
|
||||
|
||||
targetBootstrapPath := filepath.Join(workDir, "bootstrap")
|
||||
|
||||
blobDigests, err := tool.Merge(tool.MergeOption{
|
||||
BuilderPath: getBuilder(opt.BuilderPath),
|
||||
|
||||
SourceBootstrapPaths: sourceBootstrapPaths,
|
||||
TargetBootstrapPath: targetBootstrapPath,
|
||||
ChunkDictPath: opt.ChunkDictPath,
|
||||
PrefetchPatterns: opt.PrefetchPatterns,
|
||||
OutputJSONPath: filepath.Join(workDir, "merge-output.json"),
|
||||
Timeout: opt.Timeout,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merge bootstrap")
|
||||
}
|
||||
|
||||
var rc io.ReadCloser
|
||||
|
||||
if opt.WithTar {
|
||||
rc, err = packToTar(targetBootstrapPath, fmt.Sprintf("image/%s", bootstrapNameInTar), false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "pack bootstrap to tar")
|
||||
}
|
||||
} else {
|
||||
rc, err = os.Open(targetBootstrapPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open targe bootstrap")
|
||||
}
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
buffer := bufPool.Get().(*[]byte)
|
||||
defer bufPool.Put(buffer)
|
||||
if _, err = io.CopyBuffer(dest, rc, *buffer); err != nil {
|
||||
return nil, errors.Wrap(err, "copy merged bootstrap")
|
||||
}
|
||||
|
||||
return blobDigests, nil
|
||||
}
|
||||
|
||||
// Unpack converts a nydus blob layer to OCI formatted tar stream.
|
||||
func Unpack(ctx context.Context, ra content.ReaderAt, dest io.Writer, opt UnpackOption) error {
|
||||
workDir, err := ensureWorkDir(opt.WorkDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "ensure work directory")
|
||||
}
|
||||
defer os.RemoveAll(workDir)
|
||||
|
||||
bootPath, blobPath := filepath.Join(workDir, bootstrapNameInTar), filepath.Join(workDir, blobNameInTar)
|
||||
if err = unpackNydusTar(ctx, bootPath, blobPath, ra); err != nil {
|
||||
return errors.Wrap(err, "unpack nydus tar")
|
||||
}
|
||||
|
||||
tarPath := filepath.Join(workDir, "oci.tar")
|
||||
blobFifo, err := fifo.OpenFifo(ctx, tarPath, syscall.O_CREAT|syscall.O_RDONLY|syscall.O_NONBLOCK, 0644)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "create fifo file")
|
||||
}
|
||||
defer blobFifo.Close()
|
||||
|
||||
unpackErrChan := make(chan error)
|
||||
go func() {
|
||||
defer close(unpackErrChan)
|
||||
err := tool.Unpack(tool.UnpackOption{
|
||||
BuilderPath: getBuilder(opt.BuilderPath),
|
||||
BootstrapPath: bootPath,
|
||||
BlobPath: blobPath,
|
||||
TarPath: tarPath,
|
||||
Timeout: opt.Timeout,
|
||||
})
|
||||
if err != nil {
|
||||
blobFifo.Close()
|
||||
unpackErrChan <- err
|
||||
}
|
||||
}()
|
||||
|
||||
buffer := bufPool.Get().(*[]byte)
|
||||
defer bufPool.Put(buffer)
|
||||
if _, err := io.CopyBuffer(dest, blobFifo, *buffer); err != nil {
|
||||
if unpackErr := <-unpackErrChan; unpackErr != nil {
|
||||
return errors.Wrap(unpackErr, "unpack")
|
||||
}
|
||||
return errors.Wrap(err, "copy oci tar")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsNydusBlobAndExists returns true when the specified digest of content exists in
|
||||
// the content store and it's nydus blob format.
|
||||
func IsNydusBlobAndExists(ctx context.Context, cs content.Store, desc ocispec.Descriptor) bool {
|
||||
_, err := cs.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return IsNydusBlob(ctx, desc)
|
||||
}
|
||||
|
||||
// IsNydusBlob returns true when the specified descriptor is nydus blob format.
|
||||
func IsNydusBlob(ctx context.Context, desc ocispec.Descriptor) bool {
|
||||
if desc.Annotations == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_, hasAnno := desc.Annotations[LayerAnnotationNydusBlob]
|
||||
return hasAnno
|
||||
}
|
||||
|
||||
// LayerConvertFunc returns a function which converts an OCI image layer to
|
||||
// a nydus blob layer, and set the media type to "application/vnd.oci.image.layer.nydus.blob.v1".
|
||||
func LayerConvertFunc(opt PackOption) converter.ConvertFunc {
|
||||
return func(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) {
|
||||
if !images.IsLayerType(desc.MediaType) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
ra, err := cs.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get source blob reader")
|
||||
}
|
||||
defer ra.Close()
|
||||
rdr := io.NewSectionReader(ra, 0, ra.Size())
|
||||
|
||||
ref := fmt.Sprintf("convert-nydus-from-%s", desc.Digest)
|
||||
dst, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open blob writer")
|
||||
}
|
||||
defer dst.Close()
|
||||
|
||||
tr, err := compression.DecompressStream(rdr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "decompress blob stream")
|
||||
}
|
||||
|
||||
digester := digest.SHA256.Digester()
|
||||
pr, pw := io.Pipe()
|
||||
tw, err := Pack(ctx, io.MultiWriter(pw, digester.Hash()), opt)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "pack tar to nydus")
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer pw.Close()
|
||||
buffer := bufPool.Get().(*[]byte)
|
||||
defer bufPool.Put(buffer)
|
||||
if _, err := io.CopyBuffer(tw, tr, *buffer); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if err := tr.Close(); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
if err := content.Copy(ctx, dst, pr, 0, ""); err != nil {
|
||||
return nil, errors.Wrap(err, "copy nydus blob to content store")
|
||||
}
|
||||
|
||||
blobDigest := digester.Digest()
|
||||
info, err := cs.Info(ctx, blobDigest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "get blob info %s", blobDigest)
|
||||
}
|
||||
if info.Labels == nil {
|
||||
info.Labels = map[string]string{}
|
||||
}
|
||||
// Write a diff id label of layer in content store for simplifying
|
||||
// diff id calculation to speed up the conversion.
|
||||
// See: https://github.com/containerd/containerd/blob/e4fefea5544d259177abb85b64e428702ac49c97/images/diffid.go#L49
|
||||
info.Labels[labels.LabelUncompressed] = blobDigest.String()
|
||||
_, err = cs.Update(ctx, info)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "update layer label")
|
||||
}
|
||||
|
||||
newDesc := ocispec.Descriptor{
|
||||
Digest: blobDigest,
|
||||
Size: info.Size,
|
||||
MediaType: MediaTypeNydusBlob,
|
||||
Annotations: map[string]string{
|
||||
// Use `containerd.io/uncompressed` to generate DiffID of
|
||||
// layer defined in OCI spec.
|
||||
LayerAnnotationUncompressed: blobDigest.String(),
|
||||
LayerAnnotationNydusBlob: "true",
|
||||
},
|
||||
}
|
||||
|
||||
if opt.Backend != nil {
|
||||
blobRa, err := cs.ReaderAt(ctx, newDesc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get nydus blob reader")
|
||||
}
|
||||
defer blobRa.Close()
|
||||
|
||||
if err := opt.Backend.Push(ctx, blobRa, blobDigest); err != nil {
|
||||
return nil, errors.Wrap(err, "push to storage backend")
|
||||
}
|
||||
}
|
||||
|
||||
return &newDesc, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ConvertHookFunc returns a function which will be used as a callback
|
||||
// called for each blob after conversion is done. The function only hooks
|
||||
// the index conversion and the manifest conversion.
|
||||
func ConvertHookFunc(opt MergeOption) converter.ConvertHookFunc {
|
||||
return func(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error) {
|
||||
switch {
|
||||
case images.IsIndexType(newDesc.MediaType):
|
||||
return convertIndex(ctx, cs, orgDesc, newDesc)
|
||||
case images.IsManifestType(newDesc.MediaType):
|
||||
return convertManifest(ctx, cs, newDesc, opt)
|
||||
default:
|
||||
return newDesc, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// convertIndex modifies the original index by appending "nydus.remoteimage.v1"
|
||||
// to the Platform.OSFeatures of each modified manifest descriptors.
|
||||
func convertIndex(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error) {
|
||||
var orgIndex ocispec.Index
|
||||
if _, err := readJSON(ctx, cs, &orgIndex, orgDesc); err != nil {
|
||||
return nil, errors.Wrap(err, "read target image index json")
|
||||
}
|
||||
// isManifestModified is a function to check whether the manifest is modified.
|
||||
isManifestModified := func(manifest ocispec.Descriptor) bool {
|
||||
for _, oldManifest := range orgIndex.Manifests {
|
||||
if manifest.Digest == oldManifest.Digest {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var index ocispec.Index
|
||||
indexLabels, err := readJSON(ctx, cs, &index, *newDesc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read index json")
|
||||
}
|
||||
for i, manifest := range index.Manifests {
|
||||
if !isManifestModified(manifest) {
|
||||
// Skip the manifest which is not modified.
|
||||
continue
|
||||
}
|
||||
manifest.Platform.OSFeatures = append(manifest.Platform.OSFeatures, ManifestOSFeatureNydus)
|
||||
index.Manifests[i] = manifest
|
||||
}
|
||||
// Update image index in content store.
|
||||
newIndexDesc, err := writeJSON(ctx, cs, index, *newDesc, indexLabels)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "write index json")
|
||||
}
|
||||
return newIndexDesc, nil
|
||||
}
|
||||
|
||||
// convertManifest merges all the nydus blob layers into a
|
||||
// nydus bootstrap layer, update the image config,
|
||||
// and modify the image manifest.
|
||||
func convertManifest(ctx context.Context, cs content.Store, newDesc *ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, error) {
|
||||
var manifest ocispec.Manifest
|
||||
manifestDesc := *newDesc
|
||||
manifestLabels, err := readJSON(ctx, cs, &manifest, manifestDesc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read manifest json")
|
||||
}
|
||||
|
||||
// Append bootstrap layer to manifest.
|
||||
bootstrapDesc, blobDescs, err := MergeLayers(ctx, cs, manifest.Layers, MergeOption{
|
||||
BuilderPath: opt.BuilderPath,
|
||||
WorkDir: opt.WorkDir,
|
||||
ChunkDictPath: opt.ChunkDictPath,
|
||||
FsVersion: opt.FsVersion,
|
||||
WithTar: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "merge nydus layers")
|
||||
}
|
||||
if opt.Backend != nil {
|
||||
// Only append nydus bootstrap layer into manifest, and do not put nydus
|
||||
// blob layer into manifest if blob storage backend is specified.
|
||||
manifest.Layers = []ocispec.Descriptor{*bootstrapDesc}
|
||||
} else {
|
||||
for idx, blobDesc := range blobDescs {
|
||||
blobGCLabelKey := fmt.Sprintf("containerd.io/gc.ref.content.l.%d", idx)
|
||||
manifestLabels[blobGCLabelKey] = blobDesc.Digest.String()
|
||||
}
|
||||
// Affected by chunk dict, the blob list referenced by final bootstrap
|
||||
// are from different layers, part of them are from original layers, part
|
||||
// from chunk dict bootstrap, so we need to rewrite manifest's layers here.
|
||||
manifest.Layers = append(blobDescs, *bootstrapDesc)
|
||||
}
|
||||
|
||||
// Update the gc label of bootstrap layer
|
||||
bootstrapGCLabelKey := fmt.Sprintf("containerd.io/gc.ref.content.l.%d", len(manifest.Layers)-1)
|
||||
manifestLabels[bootstrapGCLabelKey] = bootstrapDesc.Digest.String()
|
||||
|
||||
// Rewrite diff ids and remove useless annotation.
|
||||
var config ocispec.Image
|
||||
configLabels, err := readJSON(ctx, cs, &config, manifest.Config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "read image config")
|
||||
}
|
||||
if opt.Backend != nil {
|
||||
config.RootFS.DiffIDs = []digest.Digest{digest.Digest(bootstrapDesc.Annotations[LayerAnnotationUncompressed])}
|
||||
} else {
|
||||
config.RootFS.DiffIDs = make([]digest.Digest, 0, len(manifest.Layers))
|
||||
for i, layer := range manifest.Layers {
|
||||
config.RootFS.DiffIDs = append(config.RootFS.DiffIDs, digest.Digest(layer.Annotations[LayerAnnotationUncompressed]))
|
||||
// Remove useless annotation.
|
||||
delete(manifest.Layers[i].Annotations, LayerAnnotationUncompressed)
|
||||
}
|
||||
}
|
||||
// Update image config in content store.
|
||||
newConfigDesc, err := writeJSON(ctx, cs, config, manifest.Config, configLabels)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "write image config")
|
||||
}
|
||||
manifest.Config = *newConfigDesc
|
||||
// Update the config gc label
|
||||
manifestLabels[configGCLabelKey] = newConfigDesc.Digest.String()
|
||||
|
||||
// Update image manifest in content store.
|
||||
newManifestDesc, err := writeJSON(ctx, cs, manifest, manifestDesc, manifestLabels)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "write manifest")
|
||||
}
|
||||
|
||||
return newManifestDesc, nil
|
||||
}
|
||||
|
||||
// MergeLayers merges a list of nydus blob layer into a nydus bootstrap layer.
|
||||
// The media type of the nydus bootstrap layer is "application/vnd.oci.image.layer.v1.tar+gzip".
|
||||
func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, []ocispec.Descriptor, error) {
|
||||
// Extracts nydus bootstrap from nydus format for each layer.
|
||||
layers := []Layer{}
|
||||
|
||||
var chainID digest.Digest
|
||||
for _, blobDesc := range descs {
|
||||
ra, err := cs.ReaderAt(ctx, blobDesc)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "get reader for blob %q", blobDesc.Digest)
|
||||
}
|
||||
defer ra.Close()
|
||||
layers = append(layers, Layer{
|
||||
Digest: blobDesc.Digest,
|
||||
ReaderAt: ra,
|
||||
})
|
||||
if chainID == "" {
|
||||
chainID = identity.ChainID([]digest.Digest{blobDesc.Digest})
|
||||
} else {
|
||||
chainID = identity.ChainID([]digest.Digest{chainID, blobDesc.Digest})
|
||||
}
|
||||
}
|
||||
|
||||
// Merge all nydus bootstraps into a final nydus bootstrap.
|
||||
pr, pw := io.Pipe()
|
||||
blobDigestChan := make(chan []digest.Digest, 1)
|
||||
go func() {
|
||||
defer pw.Close()
|
||||
blobDigests, err := Merge(ctx, layers, pw, opt)
|
||||
if err != nil {
|
||||
pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap"))
|
||||
}
|
||||
blobDigestChan <- blobDigests
|
||||
}()
|
||||
|
||||
// Compress final nydus bootstrap to tar.gz and write into content store.
|
||||
cw, err := content.OpenWriter(ctx, cs, content.WithRef("nydus-merge-"+chainID.String()))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "open content store writer")
|
||||
}
|
||||
defer cw.Close()
|
||||
|
||||
gw := gzip.NewWriter(cw)
|
||||
uncompressedDgst := digest.SHA256.Digester()
|
||||
compressed := io.MultiWriter(gw, uncompressedDgst.Hash())
|
||||
buffer := bufPool.Get().(*[]byte)
|
||||
defer bufPool.Put(buffer)
|
||||
if _, err := io.CopyBuffer(compressed, pr, *buffer); err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "copy bootstrap targz into content store")
|
||||
}
|
||||
if err := gw.Close(); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "close gzip writer")
|
||||
}
|
||||
|
||||
compressedDgst := cw.Digest()
|
||||
if err := cw.Commit(ctx, 0, compressedDgst, content.WithLabels(map[string]string{
|
||||
LayerAnnotationUncompressed: uncompressedDgst.Digest().String(),
|
||||
})); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return nil, nil, errors.Wrap(err, "commit to content store")
|
||||
}
|
||||
}
|
||||
if err := cw.Close(); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "close content store writer")
|
||||
}
|
||||
|
||||
bootstrapInfo, err := cs.Info(ctx, compressedDgst)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "get info from content store")
|
||||
}
|
||||
|
||||
blobDigests := <-blobDigestChan
|
||||
blobDescs := []ocispec.Descriptor{}
|
||||
blobIDs := []string{}
|
||||
for _, blobDigest := range blobDigests {
|
||||
blobInfo, err := cs.Info(ctx, blobDigest)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "get info from content store")
|
||||
}
|
||||
blobDesc := ocispec.Descriptor{
|
||||
Digest: blobDigest,
|
||||
Size: blobInfo.Size,
|
||||
MediaType: MediaTypeNydusBlob,
|
||||
Annotations: map[string]string{
|
||||
LayerAnnotationUncompressed: blobDigest.String(),
|
||||
LayerAnnotationNydusBlob: "true",
|
||||
},
|
||||
}
|
||||
blobDescs = append(blobDescs, blobDesc)
|
||||
blobIDs = append(blobIDs, blobDigest.Hex())
|
||||
}
|
||||
|
||||
blobIDsBytes, err := json.Marshal(blobIDs)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "marshal blob ids")
|
||||
}
|
||||
|
||||
if opt.FsVersion == "" {
|
||||
opt.FsVersion = "5"
|
||||
}
|
||||
|
||||
bootstrapDesc := ocispec.Descriptor{
|
||||
Digest: compressedDgst,
|
||||
Size: bootstrapInfo.Size,
|
||||
MediaType: ocispec.MediaTypeImageLayerGzip,
|
||||
Annotations: map[string]string{
|
||||
LayerAnnotationUncompressed: uncompressedDgst.Digest().String(),
|
||||
LayerAnnotationFSVersion: opt.FsVersion,
|
||||
// Use this annotation to identify nydus bootstrap layer.
|
||||
LayerAnnotationNydusBootstrap: "true",
|
||||
// Track all blob digests for nydus snapshotter.
|
||||
LayerAnnotationNydusBlobIDs: string(blobIDsBytes),
|
||||
},
|
||||
}
|
||||
|
||||
return &bootstrapDesc, blobDescs, nil
|
||||
}
|
51
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_windows.go
generated
vendored
Normal file
51
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/convert_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
* Copyright (c) 2022. Nydus Developers. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package converter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images/converter"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
func Pack(ctx context.Context, dest io.Writer, opt PackOption) (io.WriteCloser, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func Merge(ctx context.Context, layers []Layer, dest io.Writer, opt MergeOption) error {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func Unpack(ctx context.Context, ia content.ReaderAt, dest io.Writer, opt UnpackOption) error {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func IsNydusBlobAndExists(ctx context.Context, cs content.Store, desc ocispec.Descriptor) bool {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func IsNydusBlob(ctx context.Context, desc ocispec.Descriptor) bool {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func LayerConvertFunc(opt PackOption) converter.ConvertFunc {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func ConvertHookFunc(opt MergeOption) converter.ConvertHookFunc {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func MergeLayers(ctx context.Context, cs content.Store, descs []ocispec.Descriptor, opt MergeOption) (*ocispec.Descriptor, error) {
|
||||
panic("not implemented")
|
||||
}
|
217
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go
generated
vendored
Normal file
217
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/tool/builder.go
generated
vendored
Normal file
|
@ -0,0 +1,217 @@
|
|||
/*
|
||||
* Copyright (c) 2022. Nydus Developers. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package tool
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/nydus-snapshotter/pkg/errdefs"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var logger = logrus.WithField("module", "builder")
|
||||
|
||||
type PackOption struct {
|
||||
BuilderPath string
|
||||
|
||||
BootstrapPath string
|
||||
BlobPath string
|
||||
FsVersion string
|
||||
SourcePath string
|
||||
ChunkDictPath string
|
||||
PrefetchPatterns string
|
||||
Compressor string
|
||||
Timeout *time.Duration
|
||||
}
|
||||
|
||||
type MergeOption struct {
|
||||
BuilderPath string
|
||||
|
||||
SourceBootstrapPaths []string
|
||||
TargetBootstrapPath string
|
||||
ChunkDictPath string
|
||||
PrefetchPatterns string
|
||||
OutputJSONPath string
|
||||
Timeout *time.Duration
|
||||
}
|
||||
|
||||
type UnpackOption struct {
|
||||
BuilderPath string
|
||||
BootstrapPath string
|
||||
BlobPath string
|
||||
TarPath string
|
||||
Timeout *time.Duration
|
||||
}
|
||||
|
||||
type outputJSON struct {
|
||||
Blobs []string
|
||||
}
|
||||
|
||||
func Pack(option PackOption) error {
|
||||
if option.FsVersion == "" {
|
||||
option.FsVersion = "5"
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"create",
|
||||
"--log-level",
|
||||
"warn",
|
||||
"--prefetch-policy",
|
||||
"fs",
|
||||
"--blob",
|
||||
option.BlobPath,
|
||||
"--source-type",
|
||||
"directory",
|
||||
"--whiteout-spec",
|
||||
"none",
|
||||
"--fs-version",
|
||||
option.FsVersion,
|
||||
"--inline-bootstrap",
|
||||
}
|
||||
if option.ChunkDictPath != "" {
|
||||
args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath))
|
||||
}
|
||||
if option.PrefetchPatterns == "" {
|
||||
option.PrefetchPatterns = "/"
|
||||
}
|
||||
if option.Compressor != "" {
|
||||
args = append(args, "--compressor", option.Compressor)
|
||||
}
|
||||
args = append(args, option.SourcePath)
|
||||
|
||||
ctx := context.Background()
|
||||
var cancel context.CancelFunc
|
||||
if option.Timeout != nil {
|
||||
ctx, cancel = context.WithTimeout(ctx, *option.Timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " "))
|
||||
|
||||
cmd := exec.CommandContext(ctx, option.BuilderPath, args...)
|
||||
cmd.Stdout = logger.Writer()
|
||||
cmd.Stderr = logger.Writer()
|
||||
cmd.Stdin = strings.NewReader(option.PrefetchPatterns)
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
if errdefs.IsSignalKilled(err) && option.Timeout != nil {
|
||||
logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout)
|
||||
} else {
|
||||
logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Merge(option MergeOption) ([]digest.Digest, error) {
|
||||
args := []string{
|
||||
"merge",
|
||||
"--log-level",
|
||||
"warn",
|
||||
"--prefetch-policy",
|
||||
"fs",
|
||||
"--output-json",
|
||||
option.OutputJSONPath,
|
||||
"--bootstrap",
|
||||
option.TargetBootstrapPath,
|
||||
}
|
||||
if option.ChunkDictPath != "" {
|
||||
args = append(args, "--chunk-dict", fmt.Sprintf("bootstrap=%s", option.ChunkDictPath))
|
||||
}
|
||||
if option.PrefetchPatterns == "" {
|
||||
option.PrefetchPatterns = "/"
|
||||
}
|
||||
args = append(args, option.SourceBootstrapPaths...)
|
||||
|
||||
ctx := context.Background()
|
||||
var cancel context.CancelFunc
|
||||
if option.Timeout != nil {
|
||||
ctx, cancel = context.WithTimeout(ctx, *option.Timeout)
|
||||
defer cancel()
|
||||
}
|
||||
logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " "))
|
||||
|
||||
cmd := exec.CommandContext(ctx, option.BuilderPath, args...)
|
||||
cmd.Stdout = logger.Writer()
|
||||
cmd.Stderr = logger.Writer()
|
||||
cmd.Stdin = strings.NewReader(option.PrefetchPatterns)
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
if errdefs.IsSignalKilled(err) && option.Timeout != nil {
|
||||
logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout)
|
||||
} else {
|
||||
logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args)
|
||||
}
|
||||
return nil, errors.Wrap(err, "run merge command")
|
||||
}
|
||||
|
||||
outputBytes, err := ioutil.ReadFile(option.OutputJSONPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "read file %s", option.OutputJSONPath)
|
||||
}
|
||||
var output outputJSON
|
||||
err = json.Unmarshal(outputBytes, &output)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unmarshal output json file %s", option.OutputJSONPath)
|
||||
}
|
||||
|
||||
blobDigests := []digest.Digest{}
|
||||
for _, blobID := range output.Blobs {
|
||||
blobDigests = append(blobDigests, digest.NewDigestFromHex(string(digest.SHA256), blobID))
|
||||
}
|
||||
|
||||
return blobDigests, nil
|
||||
}
|
||||
|
||||
func Unpack(option UnpackOption) error {
|
||||
args := []string{
|
||||
"unpack",
|
||||
"--log-level",
|
||||
"warn",
|
||||
"--bootstrap",
|
||||
option.BootstrapPath,
|
||||
"--output",
|
||||
option.TarPath,
|
||||
}
|
||||
if option.BlobPath != "" {
|
||||
args = append(args, "--blob", option.BlobPath)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
var cancel context.CancelFunc
|
||||
if option.Timeout != nil {
|
||||
ctx, cancel = context.WithTimeout(ctx, *option.Timeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
logrus.Debugf("\tCommand: %s %s", option.BuilderPath, strings.Join(args[:], " "))
|
||||
|
||||
cmd := exec.CommandContext(ctx, option.BuilderPath, args...)
|
||||
cmd.Stdout = logger.Writer()
|
||||
cmd.Stderr = logger.Writer()
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
if errdefs.IsSignalKilled(err) && option.Timeout != nil {
|
||||
logrus.WithError(err).Errorf("fail to run %v %+v, possibly due to timeout %v", option.BuilderPath, args, *option.Timeout)
|
||||
} else {
|
||||
logrus.WithError(err).Errorf("fail to run %v %+v", option.BuilderPath, args)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
84
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go
generated
vendored
Normal file
84
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/types.go
generated
vendored
Normal file
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Copyright (c) 2022. Nydus Developers. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package converter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type Layer struct {
|
||||
// Digest represents the hash of whole tar blob.
|
||||
Digest digest.Digest
|
||||
// ReaderAt holds the reader of whole tar blob.
|
||||
ReaderAt content.ReaderAt
|
||||
}
|
||||
|
||||
// Backend uploads blobs generated by nydus-image builder to a backend storage such as:
|
||||
// - oss: A object storage backend, which uses its SDK to upload blob file.
|
||||
type Backend interface {
|
||||
// Push pushes specified blob file to remote storage backend.
|
||||
Push(ctx context.Context, ra content.ReaderAt, blobDigest digest.Digest) error
|
||||
// Check checks whether a blob exists in remote storage backend,
|
||||
// blob exists -> return (blobPath, nil)
|
||||
// blob not exists -> return ("", err)
|
||||
Check(blobDigest digest.Digest) (string, error)
|
||||
// Type returns backend type name.
|
||||
Type() string
|
||||
}
|
||||
|
||||
type PackOption struct {
|
||||
// WorkDir is used as the work directory during layer pack.
|
||||
WorkDir string
|
||||
// BuilderPath holds the path of `nydus-image` binary tool.
|
||||
BuilderPath string
|
||||
// FsVersion specifies nydus RAFS format version, possible
|
||||
// values: `5`, `6` (EROFS-compatible), default is `5`.
|
||||
FsVersion string
|
||||
// ChunkDictPath holds the bootstrap path of chunk dict image.
|
||||
ChunkDictPath string
|
||||
// PrefetchPatterns holds file path pattern list want to prefetch.
|
||||
PrefetchPatterns string
|
||||
// Compressor specifies nydus blob compression algorithm.
|
||||
Compressor string
|
||||
// Backend uploads blobs generated by nydus-image builder to a backend storage.
|
||||
Backend Backend
|
||||
// Timeout cancels execution once exceed the specified time.
|
||||
Timeout *time.Duration
|
||||
}
|
||||
|
||||
type MergeOption struct {
|
||||
// WorkDir is used as the work directory during layer merge.
|
||||
WorkDir string
|
||||
// BuilderPath holds the path of `nydus-image` binary tool.
|
||||
BuilderPath string
|
||||
// FsVersion specifies nydus RAFS format version, possible
|
||||
// values: `5`, `6` (EROFS-compatible), default is `5`.
|
||||
FsVersion string
|
||||
// ChunkDictPath holds the bootstrap path of chunk dict image.
|
||||
ChunkDictPath string
|
||||
// PrefetchPatterns holds file path pattern list want to prefetch.
|
||||
PrefetchPatterns string
|
||||
// WithTar puts bootstrap into a tar stream (no gzip).
|
||||
WithTar bool
|
||||
// Backend uploads blobs generated by nydus-image builder to a backend storage.
|
||||
Backend Backend
|
||||
// Timeout cancels execution once exceed the specified time.
|
||||
Timeout *time.Duration
|
||||
}
|
||||
|
||||
type UnpackOption struct {
|
||||
// WorkDir is used as the work directory during layer unpack.
|
||||
WorkDir string
|
||||
// BuilderPath holds the path of `nydus-image` binary tool.
|
||||
BuilderPath string
|
||||
// Timeout cancels execution once exceed the specified time.
|
||||
Timeout *time.Duration
|
||||
}
|
204
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go
generated
vendored
Normal file
204
vendor/github.com/containerd/nydus-snapshotter/pkg/converter/utils.go
generated
vendored
Normal file
|
@ -0,0 +1,204 @@
|
|||
/*
|
||||
* Copyright (c) 2022. Nydus Developers. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package converter
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type writeCloser struct {
|
||||
closed bool
|
||||
io.WriteCloser
|
||||
action func() error
|
||||
}
|
||||
|
||||
func (c *writeCloser) Close() error {
|
||||
if c.closed {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := c.WriteCloser.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
c.closed = true
|
||||
|
||||
if err := c.action(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newWriteCloser(wc io.WriteCloser, action func() error) *writeCloser {
|
||||
return &writeCloser{
|
||||
WriteCloser: wc,
|
||||
action: action,
|
||||
}
|
||||
}
|
||||
|
||||
type seekReader struct {
|
||||
io.ReaderAt
|
||||
pos int64
|
||||
}
|
||||
|
||||
func (ra *seekReader) Read(p []byte) (int, error) {
|
||||
n, err := ra.ReaderAt.ReadAt(p, ra.pos)
|
||||
ra.pos += int64(len(p))
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (ra *seekReader) Seek(offset int64, whence int) (int64, error) {
|
||||
if whence == io.SeekCurrent {
|
||||
ra.pos += offset
|
||||
} else if whence == io.SeekStart {
|
||||
ra.pos = offset
|
||||
} else {
|
||||
return 0, fmt.Errorf("unsupported whence %d", whence)
|
||||
}
|
||||
return ra.pos, nil
|
||||
}
|
||||
|
||||
func newSeekReader(ra io.ReaderAt) *seekReader {
|
||||
return &seekReader{
|
||||
ReaderAt: ra,
|
||||
pos: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// packToTar makes .tar(.gz) stream of file named `name` and return reader.
|
||||
func packToTar(src string, name string, compress bool) (io.ReadCloser, error) {
|
||||
fi, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dirHdr := &tar.Header{
|
||||
Name: filepath.Dir(name),
|
||||
Mode: 0755,
|
||||
Typeflag: tar.TypeDir,
|
||||
}
|
||||
|
||||
hdr := &tar.Header{
|
||||
Name: name,
|
||||
Mode: 0444,
|
||||
Size: fi.Size(),
|
||||
}
|
||||
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
go func() {
|
||||
// Prepare targz writer
|
||||
var tw *tar.Writer
|
||||
var gw *gzip.Writer
|
||||
var err error
|
||||
var file *os.File
|
||||
|
||||
if compress {
|
||||
gw = gzip.NewWriter(writer)
|
||||
tw = tar.NewWriter(gw)
|
||||
} else {
|
||||
tw = tar.NewWriter(writer)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err1 := tw.Close()
|
||||
var err2 error
|
||||
if gw != nil {
|
||||
err2 = gw.Close()
|
||||
}
|
||||
|
||||
var finalErr error
|
||||
|
||||
// Return the first error encountered to the other end and ignore others.
|
||||
if err != nil {
|
||||
finalErr = err
|
||||
} else if err1 != nil {
|
||||
finalErr = err1
|
||||
} else if err2 != nil {
|
||||
finalErr = err2
|
||||
}
|
||||
|
||||
writer.CloseWithError(finalErr)
|
||||
}()
|
||||
|
||||
file, err = os.Open(src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Write targz stream
|
||||
if err = tw.WriteHeader(dirHdr); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = tw.WriteHeader(hdr); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = io.Copy(tw, file); err != nil {
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// Copied from containerd/containerd project, copyright The containerd Authors.
|
||||
// https://github.com/containerd/containerd/blob/4902059cb554f4f06a8d06a12134c17117809f4e/images/converter/default.go#L385
|
||||
func readJSON(ctx context.Context, cs content.Store, x interface{}, desc ocispec.Descriptor) (map[string]string, error) {
|
||||
info, err := cs.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
labels := info.Labels
|
||||
b, err := content.ReadBlob(ctx, cs, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.Unmarshal(b, x); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
// Copied from containerd/containerd project, copyright The containerd Authors.
|
||||
// https://github.com/containerd/containerd/blob/4902059cb554f4f06a8d06a12134c17117809f4e/images/converter/default.go#L401
|
||||
func writeJSON(ctx context.Context, cs content.Store, x interface{}, oldDesc ocispec.Descriptor, labels map[string]string) (*ocispec.Descriptor, error) {
|
||||
b, err := json.Marshal(x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dgst := digest.SHA256.FromBytes(b)
|
||||
ref := fmt.Sprintf("converter-write-json-%s", dgst.String())
|
||||
w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := content.Copy(ctx, w, bytes.NewReader(b), int64(len(b)), dgst, content.WithLabels(labels)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newDesc := oldDesc
|
||||
newDesc.Size = int64(len(b))
|
||||
newDesc.Digest = dgst
|
||||
return &newDesc, nil
|
||||
}
|
53
vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go
generated
vendored
Normal file
53
vendor/github.com/containerd/nydus-snapshotter/pkg/errdefs/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Copyright (c) 2020. Ant Group. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
package errdefs
|
||||
|
||||
import (
|
||||
stderrors "errors"
|
||||
"net"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const signalKilled = "signal: killed"
|
||||
|
||||
var (
|
||||
ErrAlreadyExists = errors.New("already exists")
|
||||
ErrNotFound = errors.New("not found")
|
||||
)
|
||||
|
||||
// IsAlreadyExists returns true if the error is due to already exists
|
||||
func IsAlreadyExists(err error) bool {
|
||||
return errors.Is(err, ErrAlreadyExists)
|
||||
}
|
||||
|
||||
// IsNotFound returns true if the error is due to a missing object
|
||||
func IsNotFound(err error) bool {
|
||||
return errors.Is(err, ErrNotFound)
|
||||
}
|
||||
|
||||
// IsSignalKilled returns true if the error is signal killed
|
||||
func IsSignalKilled(err error) bool {
|
||||
return strings.Contains(err.Error(), signalKilled)
|
||||
}
|
||||
|
||||
// IsConnectionClosed returns true if error is due to connection closed
|
||||
// this is used when snapshotter closed by sig term
|
||||
func IsConnectionClosed(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case *net.OpError:
|
||||
return err.Err.Error() == "use of closed network connection"
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func IsErofsMounted(err error) bool {
|
||||
return stderrors.Is(err, syscall.EBUSY)
|
||||
}
|
67
vendor/github.com/containerd/stargz-snapshotter/NOTICE.md
generated
vendored
67
vendor/github.com/containerd/stargz-snapshotter/NOTICE.md
generated
vendored
|
@ -1,67 +0,0 @@
|
|||
The source code developed under the Stargz Snapshotter Project is licensed under Apache License 2.0.
|
||||
|
||||
However, the Stargz Snapshotter project contains modified subcomponents from Container Registry Filesystem Project with separate copyright notices and license terms. Your use of the source code for the subcomponent is subject to the terms and conditions as defined by the source project. Files in these subcomponents contain following file header.
|
||||
|
||||
```
|
||||
Copyright 2019 The Go Authors. All rights reserved.
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the NOTICE.md file.
|
||||
```
|
||||
|
||||
These source code is governed by a 3-Clause BSD license. The copyright notice, list of conditions and disclaimer are the following.
|
||||
|
||||
```
|
||||
Copyright (c) 2019 Google LLC. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
```
|
||||
|
||||
The Stargz Snapshotter project also contains modified benchmarking code from HelloBench Project with separate copyright notices and license terms. Your use of the source code for the benchmarking code is subject to the terms and conditions as defined by the source project. These source code is governed by a MIT license. The copyright notice, condition and disclaimer are the following. The file in the benchmarking code contains it as the file header.
|
||||
|
||||
```
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Tintri
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
```
|
92
vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
generated
vendored
92
vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
generated
vendored
|
@ -26,10 +26,10 @@ import (
|
|||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
|
@ -48,6 +48,8 @@ type options struct {
|
|||
prioritizedFiles []string
|
||||
missedPrioritizedFiles *[]string
|
||||
compression Compression
|
||||
ctx context.Context
|
||||
minChunkSize int
|
||||
}
|
||||
|
||||
type Option func(o *options) error
|
||||
|
@ -62,6 +64,7 @@ func WithChunkSize(chunkSize int) Option {
|
|||
|
||||
// WithCompressionLevel option specifies the gzip compression level.
|
||||
// The default is gzip.BestCompression.
|
||||
// This option will be ignored if WithCompression option is used.
|
||||
// See also: https://godoc.org/compress/gzip#pkg-constants
|
||||
func WithCompressionLevel(level int) Option {
|
||||
return func(o *options) error {
|
||||
|
@ -104,6 +107,26 @@ func WithCompression(compression Compression) Option {
|
|||
}
|
||||
}
|
||||
|
||||
// WithContext specifies a context that can be used for clean canceleration.
|
||||
func WithContext(ctx context.Context) Option {
|
||||
return func(o *options) error {
|
||||
o.ctx = ctx
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMinChunkSize option specifies the minimal number of bytes of data
|
||||
// must be written in one gzip stream.
|
||||
// By increasing this number, one gzip stream can contain multiple files
|
||||
// and it hopefully leads to smaller result blob.
|
||||
// NOTE: This adds a TOC property that old reader doesn't understand.
|
||||
func WithMinChunkSize(minChunkSize int) Option {
|
||||
return func(o *options) error {
|
||||
o.minChunkSize = minChunkSize
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Blob is an eStargz blob.
|
||||
type Blob struct {
|
||||
io.ReadCloser
|
||||
|
@ -139,12 +162,29 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
|||
opts.compression = newGzipCompressionWithLevel(opts.compressionLevel)
|
||||
}
|
||||
layerFiles := newTempFiles()
|
||||
ctx := opts.ctx
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
go func() {
|
||||
select {
|
||||
case <-done:
|
||||
// nop
|
||||
case <-ctx.Done():
|
||||
layerFiles.CleanupAll()
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
if rErr != nil {
|
||||
if err := layerFiles.CleanupAll(); err != nil {
|
||||
rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr)
|
||||
}
|
||||
}
|
||||
if cErr := ctx.Err(); cErr != nil {
|
||||
rErr = fmt.Errorf("error from context %q: %w", cErr, rErr)
|
||||
}
|
||||
}()
|
||||
tarBlob, err := decompressBlob(tarBlob, layerFiles)
|
||||
if err != nil {
|
||||
|
@ -154,7 +194,14 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tarParts := divideEntries(entries, runtime.GOMAXPROCS(0))
|
||||
var tarParts [][]*entry
|
||||
if opts.minChunkSize > 0 {
|
||||
// Each entry needs to know the size of the current gzip stream so they
|
||||
// cannot be processed in parallel.
|
||||
tarParts = [][]*entry{entries}
|
||||
} else {
|
||||
tarParts = divideEntries(entries, runtime.GOMAXPROCS(0))
|
||||
}
|
||||
writers := make([]*Writer, len(tarParts))
|
||||
payloads := make([]*os.File, len(tarParts))
|
||||
var mu sync.Mutex
|
||||
|
@ -169,6 +216,13 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
|||
}
|
||||
sw := NewWriterWithCompressor(esgzFile, opts.compression)
|
||||
sw.ChunkSize = opts.chunkSize
|
||||
sw.MinChunkSize = opts.minChunkSize
|
||||
if sw.needsOpenGzEntries == nil {
|
||||
sw.needsOpenGzEntries = make(map[string]struct{})
|
||||
}
|
||||
for _, f := range []string{PrefetchLandmark, NoPrefetchLandmark} {
|
||||
sw.needsOpenGzEntries[f] = struct{}{}
|
||||
}
|
||||
if err := sw.AppendTar(readerFromEntries(parts...)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -183,7 +237,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
|||
rErr = err
|
||||
return nil, err
|
||||
}
|
||||
tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...)
|
||||
tocAndFooter, tocDgst, err := closeWithCombine(writers...)
|
||||
if err != nil {
|
||||
rErr = err
|
||||
return nil, err
|
||||
|
@ -226,7 +280,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
|||
// Writers doesn't write TOC and footer to the underlying writers so they can be
|
||||
// combined into a single eStargz and tocAndFooter returned by this function can
|
||||
// be appended at the tail of that combined blob.
|
||||
func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) {
|
||||
func closeWithCombine(ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) {
|
||||
if len(ws) == 0 {
|
||||
return nil, "", fmt.Errorf("at least one writer must be passed")
|
||||
}
|
||||
|
@ -369,7 +423,7 @@ func readerFromEntries(entries ...*entry) io.Reader {
|
|||
|
||||
func importTar(in io.ReaderAt) (*tarFile, error) {
|
||||
tf := &tarFile{}
|
||||
pw, err := newCountReader(in)
|
||||
pw, err := newCountReadSeeker(in)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make position watcher: %w", err)
|
||||
}
|
||||
|
@ -506,12 +560,13 @@ func newTempFiles() *tempFiles {
|
|||
}
|
||||
|
||||
type tempFiles struct {
|
||||
files []*os.File
|
||||
filesMu sync.Mutex
|
||||
files []*os.File
|
||||
filesMu sync.Mutex
|
||||
cleanupOnce sync.Once
|
||||
}
|
||||
|
||||
func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
|
||||
f, err := ioutil.TempFile(dir, pattern)
|
||||
f, err := os.CreateTemp(dir, pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -521,7 +576,14 @@ func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
|
|||
return f, nil
|
||||
}
|
||||
|
||||
func (tf *tempFiles) CleanupAll() error {
|
||||
func (tf *tempFiles) CleanupAll() (err error) {
|
||||
tf.cleanupOnce.Do(func() {
|
||||
err = tf.cleanupAll()
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (tf *tempFiles) cleanupAll() error {
|
||||
tf.filesMu.Lock()
|
||||
defer tf.filesMu.Unlock()
|
||||
var allErr []error
|
||||
|
@ -537,19 +599,19 @@ func (tf *tempFiles) CleanupAll() error {
|
|||
return errorutil.Aggregate(allErr)
|
||||
}
|
||||
|
||||
func newCountReader(r io.ReaderAt) (*countReader, error) {
|
||||
func newCountReadSeeker(r io.ReaderAt) (*countReadSeeker, error) {
|
||||
pos := int64(0)
|
||||
return &countReader{r: r, cPos: &pos}, nil
|
||||
return &countReadSeeker{r: r, cPos: &pos}, nil
|
||||
}
|
||||
|
||||
type countReader struct {
|
||||
type countReadSeeker struct {
|
||||
r io.ReaderAt
|
||||
cPos *int64
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (cr *countReader) Read(p []byte) (int, error) {
|
||||
func (cr *countReadSeeker) Read(p []byte) (int, error) {
|
||||
cr.mu.Lock()
|
||||
defer cr.mu.Unlock()
|
||||
|
||||
|
@ -560,7 +622,7 @@ func (cr *countReader) Read(p []byte) (int, error) {
|
|||
return n, err
|
||||
}
|
||||
|
||||
func (cr *countReader) Seek(offset int64, whence int) (int64, error) {
|
||||
func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
cr.mu.Lock()
|
||||
defer cr.mu.Unlock()
|
||||
|
||||
|
@ -581,7 +643,7 @@ func (cr *countReader) Seek(offset int64, whence int) (int64, error) {
|
|||
return offset, nil
|
||||
}
|
||||
|
||||
func (cr *countReader) currentPos() int64 {
|
||||
func (cr *countReadSeeker) currentPos() int64 {
|
||||
cr.mu.Lock()
|
||||
defer cr.mu.Unlock()
|
||||
|
||||
|
|
227
vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
227
vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
|
@ -31,7 +31,6 @@ import (
|
|||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
|
@ -151,10 +150,10 @@ func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) {
|
|||
allErr = append(allErr, err)
|
||||
continue
|
||||
}
|
||||
if tocSize <= 0 {
|
||||
if tocOffset >= 0 && tocSize <= 0 {
|
||||
tocSize = sr.Size() - tocOffset - fSize
|
||||
}
|
||||
if tocSize < int64(len(maybeTocBytes)) {
|
||||
if tocOffset >= 0 && tocSize < int64(len(maybeTocBytes)) {
|
||||
maybeTocBytes = maybeTocBytes[:tocSize]
|
||||
}
|
||||
r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts)
|
||||
|
@ -208,8 +207,16 @@ func (r *Reader) initFields() error {
|
|||
uname := map[int]string{}
|
||||
gname := map[int]string{}
|
||||
var lastRegEnt *TOCEntry
|
||||
for _, ent := range r.toc.Entries {
|
||||
var chunkTopIndex int
|
||||
for i, ent := range r.toc.Entries {
|
||||
ent.Name = cleanEntryName(ent.Name)
|
||||
switch ent.Type {
|
||||
case "reg", "chunk":
|
||||
if ent.Offset != r.toc.Entries[chunkTopIndex].Offset {
|
||||
chunkTopIndex = i
|
||||
}
|
||||
ent.chunkTopIndex = chunkTopIndex
|
||||
}
|
||||
if ent.Type == "reg" {
|
||||
lastRegEnt = ent
|
||||
}
|
||||
|
@ -295,7 +302,7 @@ func (r *Reader) initFields() error {
|
|||
if e.isDataType() {
|
||||
e.nextOffset = lastOffset
|
||||
}
|
||||
if e.Offset != 0 {
|
||||
if e.Offset != 0 && e.InnerOffset == 0 {
|
||||
lastOffset = e.Offset
|
||||
}
|
||||
}
|
||||
|
@ -489,6 +496,14 @@ func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) {
|
|||
//
|
||||
// Name must be absolute path or one that is relative to root.
|
||||
func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
|
||||
fr, err := r.newFileReader(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return io.NewSectionReader(fr, 0, fr.size), nil
|
||||
}
|
||||
|
||||
func (r *Reader) newFileReader(name string) (*fileReader, error) {
|
||||
name = cleanEntryName(name)
|
||||
ent, ok := r.Lookup(name)
|
||||
if !ok {
|
||||
|
@ -506,11 +521,19 @@ func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
|
|||
Err: errors.New("not a regular file"),
|
||||
}
|
||||
}
|
||||
fr := &fileReader{
|
||||
return &fileReader{
|
||||
r: r,
|
||||
size: ent.Size,
|
||||
ents: r.getChunks(ent),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *Reader) OpenFileWithPreReader(name string, preRead func(*TOCEntry, io.Reader) error) (*io.SectionReader, error) {
|
||||
fr, err := r.newFileReader(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fr.preRead = preRead
|
||||
return io.NewSectionReader(fr, 0, fr.size), nil
|
||||
}
|
||||
|
||||
|
@ -522,9 +545,10 @@ func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry {
|
|||
}
|
||||
|
||||
type fileReader struct {
|
||||
r *Reader
|
||||
size int64
|
||||
ents []*TOCEntry // 1 or more reg/chunk entries
|
||||
r *Reader
|
||||
size int64
|
||||
ents []*TOCEntry // 1 or more reg/chunk entries
|
||||
preRead func(*TOCEntry, io.Reader) error
|
||||
}
|
||||
|
||||
func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
|
@ -579,10 +603,48 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
|
|||
return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err)
|
||||
}
|
||||
defer dr.Close()
|
||||
if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil {
|
||||
return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err)
|
||||
|
||||
if fr.preRead == nil {
|
||||
if n, err := io.CopyN(io.Discard, dr, ent.InnerOffset+off); n != ent.InnerOffset+off || err != nil {
|
||||
return 0, fmt.Errorf("discard of %d bytes != %v, %v", ent.InnerOffset+off, n, err)
|
||||
}
|
||||
return io.ReadFull(dr, p)
|
||||
}
|
||||
return io.ReadFull(dr, p)
|
||||
|
||||
var retN int
|
||||
var retErr error
|
||||
var found bool
|
||||
var nr int64
|
||||
for _, e := range fr.r.toc.Entries[ent.chunkTopIndex:] {
|
||||
if !e.isDataType() {
|
||||
continue
|
||||
}
|
||||
if e.Offset != fr.r.toc.Entries[ent.chunkTopIndex].Offset {
|
||||
break
|
||||
}
|
||||
if in, err := io.CopyN(io.Discard, dr, e.InnerOffset-nr); err != nil || in != e.InnerOffset-nr {
|
||||
return 0, fmt.Errorf("discard of remaining %d bytes != %v, %v", e.InnerOffset-nr, in, err)
|
||||
}
|
||||
nr = e.InnerOffset
|
||||
if e == ent {
|
||||
found = true
|
||||
if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil {
|
||||
return 0, fmt.Errorf("discard of offset %d bytes != %v, %v", off, n, err)
|
||||
}
|
||||
retN, retErr = io.ReadFull(dr, p)
|
||||
nr += off + int64(retN)
|
||||
continue
|
||||
}
|
||||
cr := &countReader{r: io.LimitReader(dr, e.ChunkSize)}
|
||||
if err := fr.preRead(e, cr); err != nil {
|
||||
return 0, fmt.Errorf("failed to pre read: %w", err)
|
||||
}
|
||||
nr += cr.n
|
||||
}
|
||||
if !found {
|
||||
return 0, fmt.Errorf("fileReader.ReadAt: target entry not found")
|
||||
}
|
||||
return retN, retErr
|
||||
}
|
||||
|
||||
// A Writer writes stargz files.
|
||||
|
@ -600,11 +662,20 @@ type Writer struct {
|
|||
lastGroupname map[int]string
|
||||
compressor Compressor
|
||||
|
||||
uncompressedCounter *countWriteFlusher
|
||||
|
||||
// ChunkSize optionally controls the maximum number of bytes
|
||||
// of data of a regular file that can be written in one gzip
|
||||
// stream before a new gzip stream is started.
|
||||
// Zero means to use a default, currently 4 MiB.
|
||||
ChunkSize int
|
||||
|
||||
// MinChunkSize optionally controls the minimum number of bytes
|
||||
// of data must be written in one gzip stream before a new gzip
|
||||
// NOTE: This adds a TOC property that stargz snapshotter < v0.13.0 doesn't understand.
|
||||
MinChunkSize int
|
||||
|
||||
needsOpenGzEntries map[string]struct{}
|
||||
}
|
||||
|
||||
// currentCompressionWriter writes to the current w.gz field, which can
|
||||
|
@ -647,6 +718,9 @@ func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse footer: %w", err)
|
||||
}
|
||||
if blobPayloadSize < 0 {
|
||||
blobPayloadSize = sr.Size()
|
||||
}
|
||||
return c.Reader(io.LimitReader(sr, blobPayloadSize))
|
||||
}
|
||||
|
||||
|
@ -673,11 +747,12 @@ func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer {
|
|||
bw := bufio.NewWriter(w)
|
||||
cw := &countWriter{w: bw}
|
||||
return &Writer{
|
||||
bw: bw,
|
||||
cw: cw,
|
||||
toc: &JTOC{Version: 1},
|
||||
diffHash: sha256.New(),
|
||||
compressor: c,
|
||||
bw: bw,
|
||||
cw: cw,
|
||||
toc: &JTOC{Version: 1},
|
||||
diffHash: sha256.New(),
|
||||
compressor: c,
|
||||
uncompressedCounter: &countWriteFlusher{},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -718,6 +793,20 @@ func (w *Writer) closeGz() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (w *Writer) flushGz() error {
|
||||
if w.closed {
|
||||
return errors.New("flush on closed Writer")
|
||||
}
|
||||
if w.gz != nil {
|
||||
if f, ok := w.gz.(interface {
|
||||
Flush() error
|
||||
}); ok {
|
||||
return f.Flush()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nameIfChanged returns name, unless it was the already the value of (*mp)[id],
|
||||
// in which case it returns the empty string.
|
||||
func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
|
||||
|
@ -737,6 +826,9 @@ func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
|
|||
func (w *Writer) condOpenGz() (err error) {
|
||||
if w.gz == nil {
|
||||
w.gz, err = w.compressor.Writer(w.cw)
|
||||
if w.gz != nil {
|
||||
w.gz = w.uncompressedCounter.register(w.gz)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -785,6 +877,8 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
|
|||
if lossless {
|
||||
tr.RawAccounting = true
|
||||
}
|
||||
prevOffset := w.cw.n
|
||||
var prevOffsetUncompressed int64
|
||||
for {
|
||||
h, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
|
@ -884,10 +978,6 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
|
|||
totalSize := ent.Size // save it before we destroy ent
|
||||
tee := io.TeeReader(tr, payloadDigest.Hash())
|
||||
for written < totalSize {
|
||||
if err := w.closeGz(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chunkSize := int64(w.chunkSize())
|
||||
remain := totalSize - written
|
||||
if remain < chunkSize {
|
||||
|
@ -895,7 +985,23 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
|
|||
} else {
|
||||
ent.ChunkSize = chunkSize
|
||||
}
|
||||
ent.Offset = w.cw.n
|
||||
|
||||
// We flush the underlying compression writer here to correctly calculate "w.cw.n".
|
||||
if err := w.flushGz(); err != nil {
|
||||
return err
|
||||
}
|
||||
if w.needsOpenGz(ent) || w.cw.n-prevOffset >= int64(w.MinChunkSize) {
|
||||
if err := w.closeGz(); err != nil {
|
||||
return err
|
||||
}
|
||||
ent.Offset = w.cw.n
|
||||
prevOffset = ent.Offset
|
||||
prevOffsetUncompressed = w.uncompressedCounter.n
|
||||
} else {
|
||||
ent.Offset = prevOffset
|
||||
ent.InnerOffset = w.uncompressedCounter.n - prevOffsetUncompressed
|
||||
}
|
||||
|
||||
ent.ChunkOffset = written
|
||||
chunkDigest := digest.Canonical.Digester()
|
||||
|
||||
|
@ -933,7 +1039,7 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
remainDest := ioutil.Discard
|
||||
remainDest := io.Discard
|
||||
if lossless {
|
||||
remainDest = dst // Preserve the remaining bytes in lossless mode
|
||||
}
|
||||
|
@ -941,6 +1047,17 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (w *Writer) needsOpenGz(ent *TOCEntry) bool {
|
||||
if ent.Type != "reg" {
|
||||
return false
|
||||
}
|
||||
if w.needsOpenGzEntries == nil {
|
||||
return false
|
||||
}
|
||||
_, ok := w.needsOpenGzEntries[ent.Name]
|
||||
return ok
|
||||
}
|
||||
|
||||
// DiffID returns the SHA-256 of the uncompressed tar bytes.
|
||||
// It is only valid to call DiffID after Close.
|
||||
func (w *Writer) DiffID() string {
|
||||
|
@ -957,6 +1074,28 @@ func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) {
|
|||
}
|
||||
|
||||
func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) {
|
||||
if tocOff < 0 {
|
||||
// This means that TOC isn't contained in the blob.
|
||||
// We pass nil reader to ParseTOC and expect that ParseTOC acquire TOC from
|
||||
// the external location.
|
||||
start := time.Now()
|
||||
toc, tocDgst, err := d.ParseTOC(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil {
|
||||
opts.telemetry.GetTocLatency(start)
|
||||
}
|
||||
if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil {
|
||||
opts.telemetry.DeserializeTocLatency(start)
|
||||
}
|
||||
return &Reader{
|
||||
sr: sr,
|
||||
toc: toc,
|
||||
tocDigest: tocDgst,
|
||||
decompressor: d,
|
||||
}, nil
|
||||
}
|
||||
if len(tocBytes) > 0 {
|
||||
start := time.Now()
|
||||
toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes))
|
||||
|
@ -1022,6 +1161,37 @@ func (cw *countWriter) Write(p []byte) (n int, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
type countWriteFlusher struct {
|
||||
io.WriteCloser
|
||||
n int64
|
||||
}
|
||||
|
||||
func (wc *countWriteFlusher) register(w io.WriteCloser) io.WriteCloser {
|
||||
wc.WriteCloser = w
|
||||
return wc
|
||||
}
|
||||
|
||||
func (wc *countWriteFlusher) Write(p []byte) (n int, err error) {
|
||||
n, err = wc.WriteCloser.Write(p)
|
||||
wc.n += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
func (wc *countWriteFlusher) Flush() error {
|
||||
if f, ok := wc.WriteCloser.(interface {
|
||||
Flush() error
|
||||
}); ok {
|
||||
return f.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (wc *countWriteFlusher) Close() error {
|
||||
err := wc.WriteCloser.Close()
|
||||
wc.WriteCloser = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// isGzip reports whether br is positioned right before an upcoming gzip stream.
|
||||
// It does not consume any bytes from br.
|
||||
func isGzip(br *bufio.Reader) bool {
|
||||
|
@ -1040,3 +1210,14 @@ func positive(n int64) int64 {
|
|||
}
|
||||
return n
|
||||
}
|
||||
|
||||
type countReader struct {
|
||||
r io.Reader
|
||||
n int64
|
||||
}
|
||||
|
||||
func (cr *countReader) Read(p []byte) (n int, err error) {
|
||||
n, err = cr.r.Read(p)
|
||||
cr.n += int64(n)
|
||||
return
|
||||
}
|
||||
|
|
2
vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
generated
vendored
2
vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
generated
vendored
|
@ -60,7 +60,7 @@ type GzipCompressor struct {
|
|||
compressionLevel int
|
||||
}
|
||||
|
||||
func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) {
|
||||
func (gc *GzipCompressor) Writer(w io.Writer) (WriteFlushCloser, error) {
|
||||
return gzip.NewWriterLevel(w, gc.compressionLevel)
|
||||
}
|
||||
|
||||
|
|
699
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
699
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
|
@ -31,8 +31,9 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
@ -44,21 +45,27 @@ import (
|
|||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// TestingController is Compression with some helper methods necessary for testing.
|
||||
type TestingController interface {
|
||||
Compression
|
||||
CountStreams(*testing.T, []byte) int
|
||||
TestStreams(t *testing.T, b []byte, streams []int64)
|
||||
DiffIDOf(*testing.T, []byte) string
|
||||
String() string
|
||||
}
|
||||
|
||||
// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them.
|
||||
func CompressionTestSuite(t *testing.T, controllers ...TestingController) {
|
||||
func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) {
|
||||
t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) })
|
||||
t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) })
|
||||
t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) })
|
||||
}
|
||||
|
||||
type TestingControllerFactory func() TestingController
|
||||
|
||||
const (
|
||||
uncompressedType int = iota
|
||||
gzipType
|
||||
|
@ -75,11 +82,12 @@ var allowedPrefix = [4]string{"", "./", "/", "../"}
|
|||
|
||||
// testBuild tests the resulting stargz blob built by this pkg has the same
|
||||
// contents as the normal stargz blob.
|
||||
func testBuild(t *testing.T, controllers ...TestingController) {
|
||||
func testBuild(t *testing.T, controllers ...TestingControllerFactory) {
|
||||
tests := []struct {
|
||||
name string
|
||||
chunkSize int
|
||||
in []tarEntry
|
||||
name string
|
||||
chunkSize int
|
||||
minChunkSize []int
|
||||
in []tarEntry
|
||||
}{
|
||||
{
|
||||
name: "regfiles and directories",
|
||||
|
@ -108,11 +116,14 @@ func testBuild(t *testing.T, controllers ...TestingController) {
|
|||
),
|
||||
},
|
||||
{
|
||||
name: "various files",
|
||||
chunkSize: 4,
|
||||
name: "various files",
|
||||
chunkSize: 4,
|
||||
minChunkSize: []int{0, 64000},
|
||||
in: tarOf(
|
||||
file("baz.txt", "bazbazbazbazbazbazbaz"),
|
||||
file("foo.txt", "a"),
|
||||
file("foo1.txt", "a"),
|
||||
file("bar/foo2.txt", "b"),
|
||||
file("foo3.txt", "c"),
|
||||
symlink("barlink", "test/bar.txt"),
|
||||
dir("test/"),
|
||||
dir("dev/"),
|
||||
|
@ -144,99 +155,112 @@ func testBuild(t *testing.T, controllers ...TestingController) {
|
|||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if len(tt.minChunkSize) == 0 {
|
||||
tt.minChunkSize = []int{0}
|
||||
}
|
||||
for _, srcCompression := range srcCompressions {
|
||||
srcCompression := srcCompression
|
||||
for _, cl := range controllers {
|
||||
cl := cl
|
||||
for _, newCL := range controllers {
|
||||
newCL := newCL
|
||||
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
||||
srcTarFormat := srcTarFormat
|
||||
for _, prefix := range allowedPrefix {
|
||||
prefix := prefix
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) {
|
||||
tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
|
||||
// Test divideEntries()
|
||||
entries, err := sortEntries(tarBlob, nil, nil) // identical order
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse tar: %v", err)
|
||||
}
|
||||
var merged []*entry
|
||||
for _, part := range divideEntries(entries, 4) {
|
||||
merged = append(merged, part...)
|
||||
}
|
||||
if !reflect.DeepEqual(entries, merged) {
|
||||
for _, e := range entries {
|
||||
t.Logf("Original: %v", e.header)
|
||||
for _, minChunkSize := range tt.minChunkSize {
|
||||
minChunkSize := minChunkSize
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) {
|
||||
tarBlob := buildTar(t, tt.in, prefix, srcTarFormat)
|
||||
// Test divideEntries()
|
||||
entries, err := sortEntries(tarBlob, nil, nil) // identical order
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse tar: %v", err)
|
||||
}
|
||||
for _, e := range merged {
|
||||
t.Logf("Merged: %v", e.header)
|
||||
var merged []*entry
|
||||
for _, part := range divideEntries(entries, 4) {
|
||||
merged = append(merged, part...)
|
||||
}
|
||||
if !reflect.DeepEqual(entries, merged) {
|
||||
for _, e := range entries {
|
||||
t.Logf("Original: %v", e.header)
|
||||
}
|
||||
for _, e := range merged {
|
||||
t.Logf("Merged: %v", e.header)
|
||||
}
|
||||
t.Errorf("divided entries couldn't be merged")
|
||||
return
|
||||
}
|
||||
t.Errorf("divided entries couldn't be merged")
|
||||
return
|
||||
}
|
||||
|
||||
// Prepare sample data
|
||||
wantBuf := new(bytes.Buffer)
|
||||
sw := NewWriterWithCompressor(wantBuf, cl)
|
||||
sw.ChunkSize = tt.chunkSize
|
||||
if err := sw.AppendTar(tarBlob); err != nil {
|
||||
t.Fatalf("failed to append tar to want stargz: %v", err)
|
||||
}
|
||||
if _, err := sw.Close(); err != nil {
|
||||
t.Fatalf("failed to prepare want stargz: %v", err)
|
||||
}
|
||||
wantData := wantBuf.Bytes()
|
||||
want, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(wantData), 0, int64(len(wantData))),
|
||||
WithDecompressors(cl),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the want stargz: %v", err)
|
||||
}
|
||||
// Prepare sample data
|
||||
cl1 := newCL()
|
||||
wantBuf := new(bytes.Buffer)
|
||||
sw := NewWriterWithCompressor(wantBuf, cl1)
|
||||
sw.MinChunkSize = minChunkSize
|
||||
sw.ChunkSize = tt.chunkSize
|
||||
if err := sw.AppendTar(tarBlob); err != nil {
|
||||
t.Fatalf("failed to append tar to want stargz: %v", err)
|
||||
}
|
||||
if _, err := sw.Close(); err != nil {
|
||||
t.Fatalf("failed to prepare want stargz: %v", err)
|
||||
}
|
||||
wantData := wantBuf.Bytes()
|
||||
want, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(wantData), 0, int64(len(wantData))),
|
||||
WithDecompressors(cl1),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the want stargz: %v", err)
|
||||
}
|
||||
|
||||
// Prepare testing data
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
WithChunkSize(tt.chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to build stargz: %v", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
gotBuf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(gotBuf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
gotData := gotBuf.Bytes()
|
||||
got, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
|
||||
WithDecompressors(cl),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the got stargz: %v", err)
|
||||
}
|
||||
// Prepare testing data
|
||||
var opts []Option
|
||||
if minChunkSize > 0 {
|
||||
opts = append(opts, WithMinChunkSize(minChunkSize))
|
||||
}
|
||||
cl2 := newCL()
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
append(opts, WithChunkSize(tt.chunkSize), WithCompression(cl2))...)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to build stargz: %v", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
gotBuf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(gotBuf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
gotData := gotBuf.Bytes()
|
||||
got, err := Open(io.NewSectionReader(
|
||||
bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))),
|
||||
WithDecompressors(cl2),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse the got stargz: %v", err)
|
||||
}
|
||||
|
||||
// Check DiffID is properly calculated
|
||||
rc.Close()
|
||||
diffID := rc.DiffID()
|
||||
wantDiffID := cl.DiffIDOf(t, gotData)
|
||||
if diffID.String() != wantDiffID {
|
||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||
}
|
||||
// Check DiffID is properly calculated
|
||||
rc.Close()
|
||||
diffID := rc.DiffID()
|
||||
wantDiffID := cl2.DiffIDOf(t, gotData)
|
||||
if diffID.String() != wantDiffID {
|
||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||
}
|
||||
|
||||
// Compare as stargz
|
||||
if !isSameVersion(t, cl, wantData, gotData) {
|
||||
t.Errorf("built stargz hasn't same json")
|
||||
return
|
||||
}
|
||||
if !isSameEntries(t, want, got) {
|
||||
t.Errorf("built stargz isn't same as the original")
|
||||
return
|
||||
}
|
||||
// Compare as stargz
|
||||
if !isSameVersion(t, cl1, wantData, cl2, gotData) {
|
||||
t.Errorf("built stargz hasn't same json")
|
||||
return
|
||||
}
|
||||
if !isSameEntries(t, want, got) {
|
||||
t.Errorf("built stargz isn't same as the original")
|
||||
return
|
||||
}
|
||||
|
||||
// Compare as tar.gz
|
||||
if !isSameTarGz(t, cl, wantData, gotData) {
|
||||
t.Errorf("built stargz isn't same tar.gz")
|
||||
return
|
||||
}
|
||||
})
|
||||
// Compare as tar.gz
|
||||
if !isSameTarGz(t, cl1, wantData, cl2, gotData) {
|
||||
t.Errorf("built stargz isn't same tar.gz")
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -244,13 +268,13 @@ func testBuild(t *testing.T, controllers ...TestingController) {
|
|||
}
|
||||
}
|
||||
|
||||
func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool {
|
||||
aGz, err := controller.Reader(bytes.NewReader(a))
|
||||
func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
|
||||
aGz, err := cla.Reader(bytes.NewReader(a))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read A")
|
||||
}
|
||||
defer aGz.Close()
|
||||
bGz, err := controller.Reader(bytes.NewReader(b))
|
||||
bGz, err := clb.Reader(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read B")
|
||||
}
|
||||
|
@ -287,11 +311,11 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool {
|
|||
return false
|
||||
|
||||
}
|
||||
aFile, err := ioutil.ReadAll(aTar)
|
||||
aFile, err := io.ReadAll(aTar)
|
||||
if err != nil {
|
||||
t.Fatal("failed to read tar payload of A")
|
||||
}
|
||||
bFile, err := ioutil.ReadAll(bTar)
|
||||
bFile, err := io.ReadAll(bTar)
|
||||
if err != nil {
|
||||
t.Fatal("failed to read tar payload of B")
|
||||
}
|
||||
|
@ -304,12 +328,12 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func isSameVersion(t *testing.T, controller TestingController, a, b []byte) bool {
|
||||
aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), controller)
|
||||
func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool {
|
||||
aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse A: %v", err)
|
||||
}
|
||||
bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), controller)
|
||||
bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), clb)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse B: %v", err)
|
||||
}
|
||||
|
@ -463,7 +487,7 @@ func equalEntry(a, b *TOCEntry) bool {
|
|||
a.GID == b.GID &&
|
||||
a.Uname == b.Uname &&
|
||||
a.Gname == b.Gname &&
|
||||
(a.Offset > 0) == (b.Offset > 0) &&
|
||||
(a.Offset >= 0) == (b.Offset >= 0) &&
|
||||
(a.NextOffset() > 0) == (b.NextOffset() > 0) &&
|
||||
a.DevMajor == b.DevMajor &&
|
||||
a.DevMinor == b.DevMinor &&
|
||||
|
@ -510,14 +534,15 @@ func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string {
|
|||
const chunkSize = 3
|
||||
|
||||
// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int)
|
||||
type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController)
|
||||
type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory)
|
||||
|
||||
// testDigestAndVerify runs specified checks against sample stargz blobs.
|
||||
func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
||||
func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) {
|
||||
tests := []struct {
|
||||
name string
|
||||
tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry)
|
||||
checks []check
|
||||
name string
|
||||
tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry)
|
||||
checks []check
|
||||
minChunkSize []int
|
||||
}{
|
||||
{
|
||||
name: "no-regfile",
|
||||
|
@ -544,6 +569,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
|||
regDigest(t, "test/bar.txt", "bbb", dgstMap),
|
||||
)
|
||||
},
|
||||
minChunkSize: []int{0, 64000},
|
||||
checks: []check{
|
||||
checkStargzTOC,
|
||||
checkVerifyTOC,
|
||||
|
@ -581,11 +607,14 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
|||
},
|
||||
},
|
||||
{
|
||||
name: "with-non-regfiles",
|
||||
name: "with-non-regfiles",
|
||||
minChunkSize: []int{0, 64000},
|
||||
tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) {
|
||||
return tarOf(
|
||||
regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap),
|
||||
regDigest(t, "foo.txt", "a", dgstMap),
|
||||
regDigest(t, "bar/foo2.txt", "b", dgstMap),
|
||||
regDigest(t, "foo3.txt", "c", dgstMap),
|
||||
symlink("barlink", "test/bar.txt"),
|
||||
dir("test/"),
|
||||
regDigest(t, "test/bar.txt", "testbartestbar", dgstMap),
|
||||
|
@ -599,6 +628,8 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
|||
checkVerifyInvalidStargzFail(buildTar(t, tarOf(
|
||||
file("baz.txt", "bazbazbazbazbazbazbaz"),
|
||||
file("foo.txt", "a"),
|
||||
file("bar/foo2.txt", "b"),
|
||||
file("foo3.txt", "c"),
|
||||
symlink("barlink", "test/bar.txt"),
|
||||
dir("test/"),
|
||||
file("test/bar.txt", "testbartestbar"),
|
||||
|
@ -612,38 +643,45 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
|||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if len(tt.minChunkSize) == 0 {
|
||||
tt.minChunkSize = []int{0}
|
||||
}
|
||||
for _, srcCompression := range srcCompressions {
|
||||
srcCompression := srcCompression
|
||||
for _, cl := range controllers {
|
||||
cl := cl
|
||||
for _, newCL := range controllers {
|
||||
newCL := newCL
|
||||
for _, prefix := range allowedPrefix {
|
||||
prefix := prefix
|
||||
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
||||
srcTarFormat := srcTarFormat
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) {
|
||||
// Get original tar file and chunk digests
|
||||
dgstMap := make(map[string]digest.Digest)
|
||||
tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
|
||||
for _, minChunkSize := range tt.minChunkSize {
|
||||
minChunkSize := minChunkSize
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) {
|
||||
// Get original tar file and chunk digests
|
||||
dgstMap := make(map[string]digest.Digest)
|
||||
tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat)
|
||||
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
WithChunkSize(chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to convert stargz: %v", err)
|
||||
}
|
||||
tocDigest := rc.TOCDigest()
|
||||
defer rc.Close()
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
newStargz := buf.Bytes()
|
||||
// NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
|
||||
dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
|
||||
cl := newCL()
|
||||
rc, err := Build(compressBlob(t, tarBlob, srcCompression),
|
||||
WithChunkSize(chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to convert stargz: %v", err)
|
||||
}
|
||||
tocDigest := rc.TOCDigest()
|
||||
defer rc.Close()
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, rc); err != nil {
|
||||
t.Fatalf("failed to copy built stargz blob: %v", err)
|
||||
}
|
||||
newStargz := buf.Bytes()
|
||||
// NoPrefetchLandmark is added during `Bulid`, which is expected behaviour.
|
||||
dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents})
|
||||
|
||||
for _, check := range tt.checks {
|
||||
check(t, newStargz, tocDigest, dgstMap, cl)
|
||||
}
|
||||
})
|
||||
for _, check := range tt.checks {
|
||||
check(t, newStargz, tocDigest, dgstMap, cl, newCL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -654,7 +692,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingController) {
|
|||
// checkStargzTOC checks the TOC JSON of the passed stargz has the expected
|
||||
// digest and contains valid chunks. It walks all entries in the stargz and
|
||||
// checks all chunk digests stored to the TOC JSON match the actual contents.
|
||||
func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) {
|
||||
func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
|
||||
sgz, err := Open(
|
||||
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
|
||||
WithDecompressors(controller),
|
||||
|
@ -765,7 +803,7 @@ func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM
|
|||
// checkVerifyTOC checks the verification works for the TOC JSON of the passed
|
||||
// stargz. It walks all entries in the stargz and checks the verifications for
|
||||
// all chunks work.
|
||||
func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) {
|
||||
func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
|
||||
sgz, err := Open(
|
||||
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
|
||||
WithDecompressors(controller),
|
||||
|
@ -846,7 +884,7 @@ func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM
|
|||
// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be
|
||||
// detected during the verification and the verification returns an error.
|
||||
func checkVerifyInvalidTOCEntryFail(filename string) check {
|
||||
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) {
|
||||
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
|
||||
funcs := map[string]rewriteFunc{
|
||||
"lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) {
|
||||
var found bool
|
||||
|
@ -920,8 +958,9 @@ func checkVerifyInvalidTOCEntryFail(filename string) check {
|
|||
// checkVerifyInvalidStargzFail checks if the verification detects that the
|
||||
// given stargz file doesn't match to the expected digest and returns error.
|
||||
func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
|
||||
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) {
|
||||
rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(controller))
|
||||
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
|
||||
cl := newController()
|
||||
rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to convert stargz: %v", err)
|
||||
}
|
||||
|
@ -934,7 +973,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
|
|||
|
||||
sgz, err := Open(
|
||||
io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))),
|
||||
WithDecompressors(controller),
|
||||
WithDecompressors(cl),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse converted stargz: %v", err)
|
||||
|
@ -951,7 +990,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check {
|
|||
// checkVerifyBrokenContentFail checks if the verifier detects broken contents
|
||||
// that doesn't match to the expected digest and returns error.
|
||||
func checkVerifyBrokenContentFail(filename string) check {
|
||||
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) {
|
||||
return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) {
|
||||
// Parse stargz file
|
||||
sgz, err := Open(
|
||||
io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))),
|
||||
|
@ -1070,7 +1109,10 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
|
|||
}
|
||||
|
||||
// Decode the TOC JSON
|
||||
tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
|
||||
var tocReader io.Reader
|
||||
if tocOffset >= 0 {
|
||||
tocReader = io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
|
||||
}
|
||||
decodedJTOC, _, err = controller.ParseTOC(tocReader)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to parse TOC: %w", err)
|
||||
|
@ -1078,28 +1120,31 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
|
|||
return decodedJTOC, tocOffset, nil
|
||||
}
|
||||
|
||||
func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
||||
func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) {
|
||||
const content = "Some contents"
|
||||
invalidUtf8 := "\xff\xfe\xfd"
|
||||
|
||||
xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8}
|
||||
sampleOwner := owner{uid: 50, gid: 100}
|
||||
|
||||
data64KB := randomContents(64000)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
chunkSize int
|
||||
in []tarEntry
|
||||
want []stargzCheck
|
||||
wantNumGz int // expected number of streams
|
||||
name string
|
||||
chunkSize int
|
||||
minChunkSize int
|
||||
in []tarEntry
|
||||
want []stargzCheck
|
||||
wantNumGz int // expected number of streams
|
||||
|
||||
wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz
|
||||
wantFailOnLossLess bool
|
||||
wantTOCVersion int // default = 1
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
in: tarOf(),
|
||||
wantNumGz: 2, // empty tar + TOC + footer
|
||||
wantNumGzLossLess: 3, // empty tar + TOC + footer
|
||||
name: "empty",
|
||||
in: tarOf(),
|
||||
wantNumGz: 2, // (empty tar) + TOC + footer
|
||||
want: checks(
|
||||
numTOCEntries(0),
|
||||
),
|
||||
|
@ -1195,7 +1240,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
|||
dir("foo/"),
|
||||
file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"),
|
||||
),
|
||||
wantNumGz: 9,
|
||||
wantNumGz: 9, // dir + big.txt(6 chunks) + TOC + footer
|
||||
want: checks(
|
||||
numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file
|
||||
hasDir("foo/"),
|
||||
|
@ -1314,23 +1359,120 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
|||
),
|
||||
wantFailOnLossLess: true,
|
||||
},
|
||||
{
|
||||
name: "hardlink should be replaced to the destination entry",
|
||||
in: tarOf(
|
||||
dir("foo/"),
|
||||
file("foo/foo1", "test"),
|
||||
link("foolink", "foo/foo1"),
|
||||
),
|
||||
wantNumGz: 4, // dir, foo1 + link, TOC, footer
|
||||
want: checks(
|
||||
mustSameEntry("foo/foo1", "foolink"),
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "several_files_in_chunk",
|
||||
minChunkSize: 8000,
|
||||
in: tarOf(
|
||||
dir("foo/"),
|
||||
file("foo/foo1", data64KB),
|
||||
file("foo2", "bb"),
|
||||
file("foo22", "ccc"),
|
||||
dir("bar/"),
|
||||
file("bar/bar.txt", "aaa"),
|
||||
file("foo3", data64KB),
|
||||
),
|
||||
// NOTE: we assume that the compressed "data64KB" is still larger than 8KB
|
||||
wantNumGz: 4, // dir+foo1, foo2+foo22+dir+bar.txt+foo3, TOC, footer
|
||||
want: checks(
|
||||
numTOCEntries(7), // dir, foo1, foo2, foo22, dir, bar.txt, foo3
|
||||
hasDir("foo/"),
|
||||
hasDir("bar/"),
|
||||
hasFileLen("foo/foo1", len(data64KB)),
|
||||
hasFileLen("foo2", len("bb")),
|
||||
hasFileLen("foo22", len("ccc")),
|
||||
hasFileLen("bar/bar.txt", len("aaa")),
|
||||
hasFileLen("foo3", len(data64KB)),
|
||||
hasFileDigest("foo/foo1", digestFor(data64KB)),
|
||||
hasFileDigest("foo2", digestFor("bb")),
|
||||
hasFileDigest("foo22", digestFor("ccc")),
|
||||
hasFileDigest("bar/bar.txt", digestFor("aaa")),
|
||||
hasFileDigest("foo3", digestFor(data64KB)),
|
||||
hasFileContentsWithPreRead("foo22", 0, "ccc", chunkInfo{"foo2", "bb"}, chunkInfo{"bar/bar.txt", "aaa"}, chunkInfo{"foo3", data64KB}),
|
||||
hasFileContentsRange("foo/foo1", 0, data64KB),
|
||||
hasFileContentsRange("foo2", 0, "bb"),
|
||||
hasFileContentsRange("foo2", 1, "b"),
|
||||
hasFileContentsRange("foo22", 0, "ccc"),
|
||||
hasFileContentsRange("foo22", 1, "cc"),
|
||||
hasFileContentsRange("foo22", 2, "c"),
|
||||
hasFileContentsRange("bar/bar.txt", 0, "aaa"),
|
||||
hasFileContentsRange("bar/bar.txt", 1, "aa"),
|
||||
hasFileContentsRange("bar/bar.txt", 2, "a"),
|
||||
hasFileContentsRange("foo3", 0, data64KB),
|
||||
hasFileContentsRange("foo3", 1, data64KB[1:]),
|
||||
hasFileContentsRange("foo3", 2, data64KB[2:]),
|
||||
hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
|
||||
hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
|
||||
),
|
||||
},
|
||||
{
|
||||
name: "several_files_in_chunk_chunked",
|
||||
minChunkSize: 8000,
|
||||
chunkSize: 32000,
|
||||
in: tarOf(
|
||||
dir("foo/"),
|
||||
file("foo/foo1", data64KB),
|
||||
file("foo2", "bb"),
|
||||
dir("bar/"),
|
||||
file("foo3", data64KB),
|
||||
),
|
||||
// NOTE: we assume that the compressed chunk of "data64KB" is still larger than 8KB
|
||||
wantNumGz: 6, // dir+foo1(1), foo1(2), foo2+dir+foo3(1), foo3(2), TOC, footer
|
||||
want: checks(
|
||||
numTOCEntries(7), // dir, foo1(2 chunks), foo2, dir, foo3(2 chunks)
|
||||
hasDir("foo/"),
|
||||
hasDir("bar/"),
|
||||
hasFileLen("foo/foo1", len(data64KB)),
|
||||
hasFileLen("foo2", len("bb")),
|
||||
hasFileLen("foo3", len(data64KB)),
|
||||
hasFileDigest("foo/foo1", digestFor(data64KB)),
|
||||
hasFileDigest("foo2", digestFor("bb")),
|
||||
hasFileDigest("foo3", digestFor(data64KB)),
|
||||
hasFileContentsWithPreRead("foo2", 0, "bb", chunkInfo{"foo3", data64KB[:32000]}),
|
||||
hasFileContentsRange("foo/foo1", 0, data64KB),
|
||||
hasFileContentsRange("foo/foo1", 1, data64KB[1:]),
|
||||
hasFileContentsRange("foo/foo1", 2, data64KB[2:]),
|
||||
hasFileContentsRange("foo/foo1", len(data64KB)/2, data64KB[len(data64KB)/2:]),
|
||||
hasFileContentsRange("foo/foo1", len(data64KB)-1, data64KB[len(data64KB)-1:]),
|
||||
hasFileContentsRange("foo2", 0, "bb"),
|
||||
hasFileContentsRange("foo2", 1, "b"),
|
||||
hasFileContentsRange("foo3", 0, data64KB),
|
||||
hasFileContentsRange("foo3", 1, data64KB[1:]),
|
||||
hasFileContentsRange("foo3", 2, data64KB[2:]),
|
||||
hasFileContentsRange("foo3", len(data64KB)/2, data64KB[len(data64KB)/2:]),
|
||||
hasFileContentsRange("foo3", len(data64KB)-1, data64KB[len(data64KB)-1:]),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
for _, cl := range controllers {
|
||||
cl := cl
|
||||
for _, newCL := range controllers {
|
||||
newCL := newCL
|
||||
for _, prefix := range allowedPrefix {
|
||||
prefix := prefix
|
||||
for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} {
|
||||
srcTarFormat := srcTarFormat
|
||||
for _, lossless := range []bool{true, false} {
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) {
|
||||
t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) {
|
||||
var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat)
|
||||
origTarDgstr := digest.Canonical.Digester()
|
||||
tr = io.TeeReader(tr, origTarDgstr.Hash())
|
||||
var stargzBuf bytes.Buffer
|
||||
w := NewWriterWithCompressor(&stargzBuf, cl)
|
||||
cl1 := newCL()
|
||||
w := NewWriterWithCompressor(&stargzBuf, cl1)
|
||||
w.ChunkSize = tt.chunkSize
|
||||
w.MinChunkSize = tt.minChunkSize
|
||||
if lossless {
|
||||
err := w.AppendTarLossLess(tr)
|
||||
if tt.wantFailOnLossLess {
|
||||
|
@ -1354,7 +1496,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
|||
|
||||
if lossless {
|
||||
// Check if the result blob reserves original tar metadata
|
||||
rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl)
|
||||
rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl1)
|
||||
if err != nil {
|
||||
t.Errorf("failed to decompress blob: %v", err)
|
||||
return
|
||||
|
@ -1373,32 +1515,71 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
|||
}
|
||||
|
||||
diffID := w.DiffID()
|
||||
wantDiffID := cl.DiffIDOf(t, b)
|
||||
wantDiffID := cl1.DiffIDOf(t, b)
|
||||
if diffID != wantDiffID {
|
||||
t.Errorf("DiffID = %q; want %q", diffID, wantDiffID)
|
||||
}
|
||||
|
||||
got := cl.CountStreams(t, b)
|
||||
wantNumGz := tt.wantNumGz
|
||||
if lossless && tt.wantNumGzLossLess > 0 {
|
||||
wantNumGz = tt.wantNumGzLossLess
|
||||
}
|
||||
if got != wantNumGz {
|
||||
t.Errorf("number of streams = %d; want %d", got, wantNumGz)
|
||||
}
|
||||
|
||||
telemetry, checkCalled := newCalledTelemetry()
|
||||
sr := io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b)))
|
||||
r, err := Open(
|
||||
io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))),
|
||||
WithDecompressors(cl),
|
||||
sr,
|
||||
WithDecompressors(cl1),
|
||||
WithTelemetry(telemetry),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("stargz.Open: %v", err)
|
||||
}
|
||||
if err := checkCalled(); err != nil {
|
||||
wantTOCVersion := 1
|
||||
if tt.wantTOCVersion > 0 {
|
||||
wantTOCVersion = tt.wantTOCVersion
|
||||
}
|
||||
if r.toc.Version != wantTOCVersion {
|
||||
t.Fatalf("invalid TOC Version %d; wanted %d", r.toc.Version, wantTOCVersion)
|
||||
}
|
||||
|
||||
footerSize := cl1.FooterSize()
|
||||
footerOffset := sr.Size() - footerSize
|
||||
footer := make([]byte, footerSize)
|
||||
if _, err := sr.ReadAt(footer, footerOffset); err != nil {
|
||||
t.Errorf("failed to read footer: %v", err)
|
||||
}
|
||||
_, tocOffset, _, err := cl1.ParseFooter(footer)
|
||||
if err != nil {
|
||||
t.Errorf("failed to parse footer: %v", err)
|
||||
}
|
||||
if err := checkCalled(tocOffset >= 0); err != nil {
|
||||
t.Errorf("telemetry failure: %v", err)
|
||||
}
|
||||
|
||||
wantNumGz := tt.wantNumGz
|
||||
if lossless && tt.wantNumGzLossLess > 0 {
|
||||
wantNumGz = tt.wantNumGzLossLess
|
||||
}
|
||||
streamOffsets := []int64{0}
|
||||
prevOffset := int64(-1)
|
||||
streams := 0
|
||||
for _, e := range r.toc.Entries {
|
||||
if e.Offset > prevOffset {
|
||||
streamOffsets = append(streamOffsets, e.Offset)
|
||||
prevOffset = e.Offset
|
||||
streams++
|
||||
}
|
||||
}
|
||||
streams++ // TOC
|
||||
if tocOffset >= 0 {
|
||||
// toc is in the blob
|
||||
streamOffsets = append(streamOffsets, tocOffset)
|
||||
}
|
||||
streams++ // footer
|
||||
streamOffsets = append(streamOffsets, footerOffset)
|
||||
if streams != wantNumGz {
|
||||
t.Errorf("number of streams in TOC = %d; want %d", streams, wantNumGz)
|
||||
}
|
||||
|
||||
t.Logf("testing streams: %+v", streamOffsets)
|
||||
cl1.TestStreams(t, b, streamOffsets)
|
||||
|
||||
for _, want := range tt.want {
|
||||
want.check(t, r)
|
||||
}
|
||||
|
@ -1410,7 +1591,12 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
|||
}
|
||||
}
|
||||
|
||||
func newCalledTelemetry() (telemetry *Telemetry, check func() error) {
|
||||
type chunkInfo struct {
|
||||
name string
|
||||
data string
|
||||
}
|
||||
|
||||
func newCalledTelemetry() (telemetry *Telemetry, check func(needsGetTOC bool) error) {
|
||||
var getFooterLatencyCalled bool
|
||||
var getTocLatencyCalled bool
|
||||
var deserializeTocLatencyCalled bool
|
||||
|
@ -1418,13 +1604,15 @@ func newCalledTelemetry() (telemetry *Telemetry, check func() error) {
|
|||
func(time.Time) { getFooterLatencyCalled = true },
|
||||
func(time.Time) { getTocLatencyCalled = true },
|
||||
func(time.Time) { deserializeTocLatencyCalled = true },
|
||||
}, func() error {
|
||||
}, func(needsGetTOC bool) error {
|
||||
var allErr []error
|
||||
if !getFooterLatencyCalled {
|
||||
allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called"))
|
||||
}
|
||||
if !getTocLatencyCalled {
|
||||
allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called"))
|
||||
if needsGetTOC {
|
||||
if !getTocLatencyCalled {
|
||||
allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called"))
|
||||
}
|
||||
}
|
||||
if !deserializeTocLatencyCalled {
|
||||
allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called"))
|
||||
|
@ -1561,6 +1749,53 @@ func hasFileDigest(file string, digest string) stargzCheck {
|
|||
})
|
||||
}
|
||||
|
||||
func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck {
|
||||
return stargzCheckFn(func(t *testing.T, r *Reader) {
|
||||
extraMap := make(map[string]chunkInfo)
|
||||
for _, e := range extra {
|
||||
extraMap[e.name] = e
|
||||
}
|
||||
var extraNames []string
|
||||
for n := range extraMap {
|
||||
extraNames = append(extraNames, n)
|
||||
}
|
||||
f, err := r.OpenFileWithPreReader(file, func(e *TOCEntry, cr io.Reader) error {
|
||||
t.Logf("On %q: got preread of %q", file, e.Name)
|
||||
ex, ok := extraMap[e.Name]
|
||||
if !ok {
|
||||
t.Fatalf("fail on %q: unexpected entry %q: %+v, %+v", file, e.Name, e, extraNames)
|
||||
}
|
||||
got, err := io.ReadAll(cr)
|
||||
if err != nil {
|
||||
t.Fatalf("fail on %q: failed to read %q: %v", file, e.Name, err)
|
||||
}
|
||||
if ex.data != string(got) {
|
||||
t.Fatalf("fail on %q: unexpected contents of %q: len=%d; want=%d", file, e.Name, len(got), len(ex.data))
|
||||
}
|
||||
delete(extraMap, e.Name)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got := make([]byte, len(want))
|
||||
n, err := f.ReadAt(got, int64(offset))
|
||||
if err != nil {
|
||||
t.Fatalf("ReadAt(len %d, offset %d, size %d) = %v, %v", len(got), offset, f.Size(), n, err)
|
||||
}
|
||||
if string(got) != want {
|
||||
t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
|
||||
}
|
||||
if len(extraMap) != 0 {
|
||||
var exNames []string
|
||||
for _, ex := range extraMap {
|
||||
exNames = append(exNames, ex.name)
|
||||
}
|
||||
t.Fatalf("fail on %q: some entries aren't read: %+v", file, exNames)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func hasFileContentsRange(file string, offset int, want string) stargzCheck {
|
||||
return stargzCheckFn(func(t *testing.T, r *Reader) {
|
||||
f, err := r.OpenFile(file)
|
||||
|
@ -1573,7 +1808,7 @@ func hasFileContentsRange(file string, offset int, want string) stargzCheck {
|
|||
t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err)
|
||||
}
|
||||
if string(got) != want {
|
||||
t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, got, want)
|
||||
t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, viewContent(got), viewContent([]byte(want)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -1731,6 +1966,67 @@ func hasEntryOwner(entry string, owner owner) stargzCheck {
|
|||
})
|
||||
}
|
||||
|
||||
func mustSameEntry(files ...string) stargzCheck {
|
||||
return stargzCheckFn(func(t *testing.T, r *Reader) {
|
||||
var first *TOCEntry
|
||||
for _, f := range files {
|
||||
if first == nil {
|
||||
var ok bool
|
||||
first, ok = r.Lookup(f)
|
||||
if !ok {
|
||||
t.Errorf("unknown first file on Lookup: %q", f)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Test Lookup
|
||||
e, ok := r.Lookup(f)
|
||||
if !ok {
|
||||
t.Errorf("unknown file on Lookup: %q", f)
|
||||
return
|
||||
}
|
||||
if e != first {
|
||||
t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first)
|
||||
return
|
||||
}
|
||||
|
||||
// Test LookupChild
|
||||
pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f)))
|
||||
if !ok {
|
||||
t.Errorf("failed to get parent of %q", f)
|
||||
return
|
||||
}
|
||||
e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f)))
|
||||
if !ok {
|
||||
t.Errorf("failed to get %q as the child of %+v", f, pe)
|
||||
return
|
||||
}
|
||||
if e != first {
|
||||
t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first)
|
||||
return
|
||||
}
|
||||
|
||||
// Test ForeachChild
|
||||
pe.ForeachChild(func(baseName string, e *TOCEntry) bool {
|
||||
if baseName == filepath.Base(filepath.Clean(f)) {
|
||||
if e != first {
|
||||
t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func viewContent(c []byte) string {
|
||||
if len(c) < 100 {
|
||||
return string(c)
|
||||
}
|
||||
return string(c[:50]) + "...(omit)..." + string(c[50:100])
|
||||
}
|
||||
|
||||
func tarOf(s ...tarEntry) []tarEntry { return s }
|
||||
|
||||
type tarEntry interface {
|
||||
|
@ -1990,6 +2286,16 @@ func regDigest(t *testing.T, name string, contentStr string, digestMap map[strin
|
|||
})
|
||||
}
|
||||
|
||||
var runes = []rune("1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||
|
||||
func randomContents(n int) string {
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = runes[rand.Intn(len(runes))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func fileModeToTarMode(mode os.FileMode) (int64, error) {
|
||||
h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "")
|
||||
if err != nil {
|
||||
|
@ -2007,3 +2313,54 @@ func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) }
|
|||
func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() }
|
||||
func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() }
|
||||
func (f fileInfoOnlyMode) Sys() interface{} { return nil }
|
||||
|
||||
func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) {
|
||||
if len(streams) == 0 {
|
||||
return // nop
|
||||
}
|
||||
|
||||
wants := map[int64]struct{}{}
|
||||
for _, s := range streams {
|
||||
wants[s] = struct{}{}
|
||||
}
|
||||
|
||||
len0 := len(b)
|
||||
br := bytes.NewReader(b)
|
||||
zr := new(gzip.Reader)
|
||||
t.Logf("got gzip streams:")
|
||||
numStreams := 0
|
||||
for {
|
||||
zoff := len0 - br.Len()
|
||||
if err := zr.Reset(br); err != nil {
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
t.Fatalf("countStreams(gzip), Reset: %v", err)
|
||||
}
|
||||
zr.Multistream(false)
|
||||
n, err := io.Copy(io.Discard, zr)
|
||||
if err != nil {
|
||||
t.Fatalf("countStreams(gzip), Copy: %v", err)
|
||||
}
|
||||
var extra string
|
||||
if len(zr.Header.Extra) > 0 {
|
||||
extra = fmt.Sprintf("; extra=%q", zr.Header.Extra)
|
||||
}
|
||||
t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra)
|
||||
delete(wants, int64(zoff))
|
||||
numStreams++
|
||||
}
|
||||
}
|
||||
|
||||
func GzipDiffIDOf(t *testing.T, b []byte) string {
|
||||
h := sha256.New()
|
||||
zr, err := gzip.NewReader(bytes.NewReader(b))
|
||||
if err != nil {
|
||||
t.Fatalf("diffIDOf(gzip): %v", err)
|
||||
}
|
||||
defer zr.Close()
|
||||
if _, err := io.Copy(h, zr); err != nil {
|
||||
t.Fatalf("diffIDOf(gzip).Copy: %v", err)
|
||||
}
|
||||
return fmt.Sprintf("sha256:%x", h.Sum(nil))
|
||||
}
|
||||
|
|
34
vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
34
vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
|
@ -149,6 +149,12 @@ type TOCEntry struct {
|
|||
// ChunkSize.
|
||||
Offset int64 `json:"offset,omitempty"`
|
||||
|
||||
// InnerOffset is an optional field indicates uncompressed offset
|
||||
// of this "reg" or "chunk" payload in a stream starts from Offset.
|
||||
// This field enables to put multiple "reg" or "chunk" payloads
|
||||
// in one chunk with having the same Offset but different InnerOffset.
|
||||
InnerOffset int64 `json:"innerOffset,omitempty"`
|
||||
|
||||
nextOffset int64 // the Offset of the next entry with a non-zero Offset
|
||||
|
||||
// DevMajor is the major device number for "char" and "block" types.
|
||||
|
@ -159,7 +165,8 @@ type TOCEntry struct {
|
|||
|
||||
// NumLink is the number of entry names pointing to this entry.
|
||||
// Zero means one name references this entry.
|
||||
NumLink int
|
||||
// This field is calculated during runtime and not recorded in TOC JSON.
|
||||
NumLink int `json:"-"`
|
||||
|
||||
// Xattrs are the extended attribute for the entry.
|
||||
Xattrs map[string][]byte `json:"xattrs,omitempty"`
|
||||
|
@ -185,6 +192,9 @@ type TOCEntry struct {
|
|||
ChunkDigest string `json:"chunkDigest,omitempty"`
|
||||
|
||||
children map[string]*TOCEntry
|
||||
|
||||
// chunkTopIndex is index of the entry where Offset starts in the blob.
|
||||
chunkTopIndex int
|
||||
}
|
||||
|
||||
// ModTime returns the entry's modification time.
|
||||
|
@ -278,7 +288,10 @@ type Compressor interface {
|
|||
// Writer returns WriteCloser to be used for writing a chunk to eStargz.
|
||||
// Everytime a chunk is written, the WriteCloser is closed and Writer is
|
||||
// called again for writing the next chunk.
|
||||
Writer(w io.Writer) (io.WriteCloser, error)
|
||||
//
|
||||
// The returned writer should implement "Flush() error" function that flushes
|
||||
// any pending compressed data to the underlying writer.
|
||||
Writer(w io.Writer) (WriteFlushCloser, error)
|
||||
|
||||
// WriteTOCAndFooter is called to write JTOC to the passed Writer.
|
||||
// diffHash calculates the DiffID (uncompressed sha256 hash) of the blob
|
||||
|
@ -302,8 +315,12 @@ type Decompressor interface {
|
|||
// payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between
|
||||
// the top until the TOC JSON).
|
||||
//
|
||||
// Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range
|
||||
// from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize).
|
||||
// If tocOffset < 0, we assume that TOC isn't contained in the blob and pass nil reader
|
||||
// to ParseTOC. We expect that ParseTOC acquire TOC from the external location and return it.
|
||||
//
|
||||
// tocSize is optional. If tocSize <= 0, it's by default the size of the range from tocOffset until the beginning of the
|
||||
// footer (blob size - tocOff - FooterSize).
|
||||
// If blobPayloadSize < 0, blobPayloadSize become the blob size.
|
||||
ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error)
|
||||
|
||||
// ParseTOC parses TOC from the passed reader. The reader provides the partial contents
|
||||
|
@ -312,5 +329,14 @@ type Decompressor interface {
|
|||
// This function returns tocDgst that represents the digest of TOC that will be used
|
||||
// to verify this blob. This must match to the value returned from
|
||||
// Compressor.WriteTOCAndFooter that is used when creating this blob.
|
||||
//
|
||||
// If tocOffset returned by ParseFooter is < 0, we assume that TOC isn't contained in the blob.
|
||||
// Pass nil reader to ParseTOC then we expect that ParseTOC acquire TOC from the external location
|
||||
// and return it.
|
||||
ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error)
|
||||
}
|
||||
|
||||
type WriteFlushCloser interface {
|
||||
io.WriteCloser
|
||||
Flush() error
|
||||
}
|
||||
|
|
13
vendor/github.com/in-toto/in-toto-golang/LICENSE
generated
vendored
Normal file
13
vendor/github.com/in-toto/in-toto-golang/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
Copyright 2018 New York University
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
156
vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go
generated
vendored
Normal file
156
vendor/github.com/in-toto/in-toto-golang/in_toto/certconstraint.go
generated
vendored
Normal file
|
@ -0,0 +1,156 @@
|
|||
package in_toto
|
||||
|
||||
import (
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
const (
|
||||
AllowAllConstraint = "*"
|
||||
)
|
||||
|
||||
// CertificateConstraint defines the attributes a certificate must have to act as a functionary.
|
||||
// A wildcard `*` allows any value in the specified attribute, where as an empty array or value
|
||||
// asserts that the certificate must have nothing for that attribute. A certificate must have
|
||||
// every value defined in a constraint to match.
|
||||
type CertificateConstraint struct {
|
||||
CommonName string `json:"common_name"`
|
||||
DNSNames []string `json:"dns_names"`
|
||||
Emails []string `json:"emails"`
|
||||
Organizations []string `json:"organizations"`
|
||||
Roots []string `json:"roots"`
|
||||
URIs []string `json:"uris"`
|
||||
}
|
||||
|
||||
// checkResult is a data structure used to hold
|
||||
// certificate constraint errors
|
||||
type checkResult struct {
|
||||
errors []error
|
||||
}
|
||||
|
||||
// newCheckResult initializes a new checkResult
|
||||
func newCheckResult() *checkResult {
|
||||
return &checkResult{
|
||||
errors: make([]error, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// evaluate runs a constraint check on a certificate
|
||||
func (cr *checkResult) evaluate(cert *x509.Certificate, constraintCheck func(*x509.Certificate) error) *checkResult {
|
||||
err := constraintCheck(cert)
|
||||
if err != nil {
|
||||
cr.errors = append(cr.errors, err)
|
||||
}
|
||||
return cr
|
||||
}
|
||||
|
||||
// error reduces all of the errors into one error with a
|
||||
// combined error message. If there are no errors, nil
|
||||
// will be returned.
|
||||
func (cr *checkResult) error() error {
|
||||
if len(cr.errors) == 0 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("cert failed constraints check: %+q", cr.errors)
|
||||
}
|
||||
|
||||
// Check tests the provided certificate against the constraint. An error is returned if the certificate
|
||||
// fails any of the constraints. nil is returned if the certificate passes all of the constraints.
|
||||
func (cc CertificateConstraint) Check(cert *x509.Certificate, rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) error {
|
||||
return newCheckResult().
|
||||
evaluate(cert, cc.checkCommonName).
|
||||
evaluate(cert, cc.checkDNSNames).
|
||||
evaluate(cert, cc.checkEmails).
|
||||
evaluate(cert, cc.checkOrganizations).
|
||||
evaluate(cert, cc.checkRoots(rootCAIDs, rootCertPool, intermediateCertPool)).
|
||||
evaluate(cert, cc.checkURIs).
|
||||
error()
|
||||
}
|
||||
|
||||
// checkCommonName verifies that the certificate's common name matches the constraint.
|
||||
func (cc CertificateConstraint) checkCommonName(cert *x509.Certificate) error {
|
||||
return checkCertConstraint("common name", []string{cc.CommonName}, []string{cert.Subject.CommonName})
|
||||
}
|
||||
|
||||
// checkDNSNames verifies that the certificate's dns names matches the constraint.
|
||||
func (cc CertificateConstraint) checkDNSNames(cert *x509.Certificate) error {
|
||||
return checkCertConstraint("dns name", cc.DNSNames, cert.DNSNames)
|
||||
}
|
||||
|
||||
// checkEmails verifies that the certificate's emails matches the constraint.
|
||||
func (cc CertificateConstraint) checkEmails(cert *x509.Certificate) error {
|
||||
return checkCertConstraint("email", cc.Emails, cert.EmailAddresses)
|
||||
}
|
||||
|
||||
// checkOrganizations verifies that the certificate's organizations matches the constraint.
|
||||
func (cc CertificateConstraint) checkOrganizations(cert *x509.Certificate) error {
|
||||
return checkCertConstraint("organization", cc.Organizations, cert.Subject.Organization)
|
||||
}
|
||||
|
||||
// checkRoots verifies that the certificate's roots matches the constraint.
|
||||
// The certificates trust chain must also be verified.
|
||||
func (cc CertificateConstraint) checkRoots(rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) func(*x509.Certificate) error {
|
||||
return func(cert *x509.Certificate) error {
|
||||
_, err := VerifyCertificateTrust(cert, rootCertPool, intermediateCertPool)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to verify roots: %w", err)
|
||||
}
|
||||
return checkCertConstraint("root", cc.Roots, rootCAIDs)
|
||||
}
|
||||
}
|
||||
|
||||
// checkURIs verifies that the certificate's URIs matches the constraint.
|
||||
func (cc CertificateConstraint) checkURIs(cert *x509.Certificate) error {
|
||||
return checkCertConstraint("uri", cc.URIs, urisToStrings(cert.URIs))
|
||||
}
|
||||
|
||||
// urisToStrings is a helper that converts a list of URL objects to the string that represents them
|
||||
func urisToStrings(uris []*url.URL) []string {
|
||||
res := make([]string, 0, len(uris))
|
||||
for _, uri := range uris {
|
||||
res = append(res, uri.String())
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// checkCertConstraint tests that the provided test values match the allowed values of the constraint.
|
||||
// All allowed values must be met one-to-one to be considered a successful match.
|
||||
func checkCertConstraint(attributeName string, constraints, values []string) error {
|
||||
// If the only constraint is to allow all, the check succeeds
|
||||
if len(constraints) == 1 && constraints[0] == AllowAllConstraint {
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(constraints) == 1 && constraints[0] == "" {
|
||||
constraints = []string{}
|
||||
}
|
||||
|
||||
if len(values) == 1 && values[0] == "" {
|
||||
values = []string{}
|
||||
}
|
||||
|
||||
// If no constraints are specified, but the certificate has values for the attribute, then the check fails
|
||||
if len(constraints) == 0 && len(values) > 0 {
|
||||
return fmt.Errorf("not expecting any %s(s), but cert has %d %s(s)", attributeName, len(values), attributeName)
|
||||
}
|
||||
|
||||
unmet := NewSet(constraints...)
|
||||
for _, v := range values {
|
||||
// if the cert has a value we didn't expect, fail early
|
||||
if !unmet.Has(v) {
|
||||
return fmt.Errorf("cert has an unexpected %s %s given constraints %+q", attributeName, v, constraints)
|
||||
}
|
||||
|
||||
// consider the constraint met
|
||||
unmet.Remove(v)
|
||||
}
|
||||
|
||||
// if we have any unmet left after going through each test value, fail.
|
||||
if len(unmet) > 0 {
|
||||
return fmt.Errorf("cert with %s(s) %+q did not pass all constraints %+q", attributeName, values, constraints)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
30
vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go
generated
vendored
Normal file
30
vendor/github.com/in-toto/in-toto-golang/in_toto/hashlib.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
package in_toto
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"hash"
|
||||
)
|
||||
|
||||
/*
|
||||
getHashMapping returns a mapping from hash algorithm to supported hash
|
||||
interface.
|
||||
*/
|
||||
func getHashMapping() map[string]func() hash.Hash {
|
||||
return map[string]func() hash.Hash{
|
||||
"sha256": sha256.New,
|
||||
"sha512": sha512.New,
|
||||
"sha384": sha512.New384,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
hashToHex calculates the hash over data based on hash algorithm h.
|
||||
*/
|
||||
func hashToHex(h hash.Hash, data []byte) []byte {
|
||||
h.Write(data)
|
||||
// We need to use h.Sum(nil) here, because otherwise hash.Sum() appends
|
||||
// the hash to the passed data. So instead of having only the hash
|
||||
// we would get: "dataHASH"
|
||||
return h.Sum(nil)
|
||||
}
|
670
vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go
generated
vendored
Normal file
670
vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go
generated
vendored
Normal file
|
@ -0,0 +1,670 @@
|
|||
package in_toto
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/hex"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/secure-systems-lab/go-securesystemslib/cjson"
|
||||
)
|
||||
|
||||
// ErrFailedPEMParsing gets returned when PKCS1, PKCS8 or PKIX key parsing fails
|
||||
var ErrFailedPEMParsing = errors.New("failed parsing the PEM block: unsupported PEM type")
|
||||
|
||||
// ErrNoPEMBlock gets triggered when there is no PEM block in the provided file
|
||||
var ErrNoPEMBlock = errors.New("failed to decode the data as PEM block (are you sure this is a pem file?)")
|
||||
|
||||
// ErrUnsupportedKeyType is returned when we are dealing with a key type different to ed25519 or RSA
|
||||
var ErrUnsupportedKeyType = errors.New("unsupported key type")
|
||||
|
||||
// ErrInvalidSignature is returned when the signature is invalid
|
||||
var ErrInvalidSignature = errors.New("invalid signature")
|
||||
|
||||
// ErrInvalidKey is returned when a given key is none of RSA, ECDSA or ED25519
|
||||
var ErrInvalidKey = errors.New("invalid key")
|
||||
|
||||
const (
|
||||
rsaKeyType string = "rsa"
|
||||
ecdsaKeyType string = "ecdsa"
|
||||
ed25519KeyType string = "ed25519"
|
||||
rsassapsssha256Scheme string = "rsassa-pss-sha256"
|
||||
ecdsaSha2nistp224 string = "ecdsa-sha2-nistp224"
|
||||
ecdsaSha2nistp256 string = "ecdsa-sha2-nistp256"
|
||||
ecdsaSha2nistp384 string = "ecdsa-sha2-nistp384"
|
||||
ecdsaSha2nistp521 string = "ecdsa-sha2-nistp521"
|
||||
ed25519Scheme string = "ed25519"
|
||||
pemPublicKey string = "PUBLIC KEY"
|
||||
pemPrivateKey string = "PRIVATE KEY"
|
||||
pemRSAPrivateKey string = "RSA PRIVATE KEY"
|
||||
)
|
||||
|
||||
/*
|
||||
getSupportedKeyIDHashAlgorithms returns a string slice of supported
|
||||
KeyIDHashAlgorithms. We need to use this function instead of a constant,
|
||||
because Go does not support global constant slices.
|
||||
*/
|
||||
func getSupportedKeyIDHashAlgorithms() Set {
|
||||
return NewSet("sha256", "sha512")
|
||||
}
|
||||
|
||||
/*
|
||||
getSupportedRSASchemes returns a string slice of supported RSA Key schemes.
|
||||
We need to use this function instead of a constant because Go does not support
|
||||
global constant slices.
|
||||
*/
|
||||
func getSupportedRSASchemes() []string {
|
||||
return []string{rsassapsssha256Scheme}
|
||||
}
|
||||
|
||||
/*
|
||||
getSupportedEcdsaSchemes returns a string slice of supported ecdsa Key schemes.
|
||||
We need to use this function instead of a constant because Go does not support
|
||||
global constant slices.
|
||||
*/
|
||||
func getSupportedEcdsaSchemes() []string {
|
||||
return []string{ecdsaSha2nistp224, ecdsaSha2nistp256, ecdsaSha2nistp384, ecdsaSha2nistp521}
|
||||
}
|
||||
|
||||
/*
|
||||
getSupportedEd25519Schemes returns a string slice of supported ed25519 Key
|
||||
schemes. We need to use this function instead of a constant because Go does
|
||||
not support global constant slices.
|
||||
*/
|
||||
func getSupportedEd25519Schemes() []string {
|
||||
return []string{ed25519Scheme}
|
||||
}
|
||||
|
||||
/*
|
||||
generateKeyID creates a partial key map and generates the key ID
|
||||
based on the created partial key map via the SHA256 method.
|
||||
The resulting keyID will be directly saved in the corresponding key object.
|
||||
On success generateKeyID will return nil, in case of errors while encoding
|
||||
there will be an error.
|
||||
*/
|
||||
func (k *Key) generateKeyID() error {
|
||||
// Create partial key map used to create the keyid
|
||||
// Unfortunately, we can't use the Key object because this also carries
|
||||
// yet unwanted fields, such as KeyID and KeyVal.Private and therefore
|
||||
// produces a different hash. We generate the keyID exactly as we do in
|
||||
// the securesystemslib to keep interoperability between other in-toto
|
||||
// implementations.
|
||||
var keyToBeHashed = map[string]interface{}{
|
||||
"keytype": k.KeyType,
|
||||
"scheme": k.Scheme,
|
||||
"keyid_hash_algorithms": k.KeyIDHashAlgorithms,
|
||||
"keyval": map[string]string{
|
||||
"public": k.KeyVal.Public,
|
||||
},
|
||||
}
|
||||
keyCanonical, err := cjson.EncodeCanonical(keyToBeHashed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// calculate sha256 and return string representation of keyID
|
||||
keyHashed := sha256.Sum256(keyCanonical)
|
||||
k.KeyID = fmt.Sprintf("%x", keyHashed)
|
||||
err = validateKey(*k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
generatePEMBlock creates a PEM block from scratch via the keyBytes and the pemType.
|
||||
If successful it returns a PEM block as []byte slice. This function should always
|
||||
succeed, if keyBytes is empty the PEM block will have an empty byte block.
|
||||
Therefore only header and footer will exist.
|
||||
*/
|
||||
func generatePEMBlock(keyBytes []byte, pemType string) []byte {
|
||||
// construct PEM block
|
||||
pemBlock := &pem.Block{
|
||||
Type: pemType,
|
||||
Headers: nil,
|
||||
Bytes: keyBytes,
|
||||
}
|
||||
return pem.EncodeToMemory(pemBlock)
|
||||
}
|
||||
|
||||
/*
|
||||
setKeyComponents sets all components in our key object.
|
||||
Furthermore it makes sure to remove any trailing and leading whitespaces or newlines.
|
||||
We treat key types differently for interoperability reasons to the in-toto python
|
||||
implementation and the securesystemslib.
|
||||
*/
|
||||
func (k *Key) setKeyComponents(pubKeyBytes []byte, privateKeyBytes []byte, keyType string, scheme string, KeyIDHashAlgorithms []string) error {
|
||||
// assume we have a privateKey if the key size is bigger than 0
|
||||
|
||||
switch keyType {
|
||||
case rsaKeyType:
|
||||
if len(privateKeyBytes) > 0 {
|
||||
k.KeyVal = KeyVal{
|
||||
Private: strings.TrimSpace(string(generatePEMBlock(privateKeyBytes, pemRSAPrivateKey))),
|
||||
Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))),
|
||||
}
|
||||
} else {
|
||||
k.KeyVal = KeyVal{
|
||||
Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))),
|
||||
}
|
||||
}
|
||||
case ecdsaKeyType:
|
||||
if len(privateKeyBytes) > 0 {
|
||||
k.KeyVal = KeyVal{
|
||||
Private: strings.TrimSpace(string(generatePEMBlock(privateKeyBytes, pemPrivateKey))),
|
||||
Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))),
|
||||
}
|
||||
} else {
|
||||
k.KeyVal = KeyVal{
|
||||
Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))),
|
||||
}
|
||||
}
|
||||
case ed25519KeyType:
|
||||
if len(privateKeyBytes) > 0 {
|
||||
k.KeyVal = KeyVal{
|
||||
Private: strings.TrimSpace(hex.EncodeToString(privateKeyBytes)),
|
||||
Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes)),
|
||||
}
|
||||
} else {
|
||||
k.KeyVal = KeyVal{
|
||||
Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes)),
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("%w: %s", ErrUnsupportedKeyType, keyType)
|
||||
}
|
||||
k.KeyType = keyType
|
||||
k.Scheme = scheme
|
||||
k.KeyIDHashAlgorithms = KeyIDHashAlgorithms
|
||||
if err := k.generateKeyID(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
parseKey tries to parse a PEM []byte slice. Using the following standards
|
||||
in the given order:
|
||||
|
||||
- PKCS8
|
||||
- PKCS1
|
||||
- PKIX
|
||||
|
||||
On success it returns the parsed key and nil.
|
||||
On failure it returns nil and the error ErrFailedPEMParsing
|
||||
*/
|
||||
func parseKey(data []byte) (interface{}, error) {
|
||||
key, err := x509.ParsePKCS8PrivateKey(data)
|
||||
if err == nil {
|
||||
return key, nil
|
||||
}
|
||||
key, err = x509.ParsePKCS1PrivateKey(data)
|
||||
if err == nil {
|
||||
return key, nil
|
||||
}
|
||||
key, err = x509.ParsePKIXPublicKey(data)
|
||||
if err == nil {
|
||||
return key, nil
|
||||
}
|
||||
key, err = x509.ParseCertificate(data)
|
||||
if err == nil {
|
||||
return key, nil
|
||||
}
|
||||
key, err = x509.ParseECPrivateKey(data)
|
||||
if err == nil {
|
||||
return key, nil
|
||||
}
|
||||
return nil, ErrFailedPEMParsing
|
||||
}
|
||||
|
||||
/*
|
||||
decodeAndParse receives potential PEM bytes decodes them via pem.Decode
|
||||
and pushes them to parseKey. If any error occurs during this process,
|
||||
the function will return nil and an error (either ErrFailedPEMParsing
|
||||
or ErrNoPEMBlock). On success it will return the decoded pemData, the
|
||||
key object interface and nil as error. We need the decoded pemData,
|
||||
because LoadKey relies on decoded pemData for operating system
|
||||
interoperability.
|
||||
*/
|
||||
func decodeAndParse(pemBytes []byte) (*pem.Block, interface{}, error) {
|
||||
// pem.Decode returns the parsed pem block and a rest.
|
||||
// The rest is everything, that could not be parsed as PEM block.
|
||||
// Therefore we can drop this via using the blank identifier "_"
|
||||
data, _ := pem.Decode(pemBytes)
|
||||
if data == nil {
|
||||
return nil, nil, ErrNoPEMBlock
|
||||
}
|
||||
|
||||
// Try to load private key, if this fails try to load
|
||||
// key as public key
|
||||
key, err := parseKey(data.Bytes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return data, key, nil
|
||||
}
|
||||
|
||||
/*
|
||||
LoadKey loads the key file at specified file path into the key object.
|
||||
It automatically derives the PEM type and the key type.
|
||||
Right now the following PEM types are supported:
|
||||
|
||||
- PKCS1 for private keys
|
||||
- PKCS8 for private keys
|
||||
- PKIX for public keys
|
||||
|
||||
The following key types are supported and will be automatically assigned to
|
||||
the key type field:
|
||||
|
||||
- ed25519
|
||||
- rsa
|
||||
- ecdsa
|
||||
|
||||
The following schemes are supported:
|
||||
|
||||
- ed25519 -> ed25519
|
||||
- rsa -> rsassa-pss-sha256
|
||||
- ecdsa -> ecdsa-sha256-nistp256
|
||||
|
||||
Note that, this behavior is consistent with the securesystemslib, except for
|
||||
ecdsa. We do not use the scheme string as key type in in-toto-golang.
|
||||
Instead we are going with a ecdsa/ecdsa-sha2-nistp256 pair.
|
||||
|
||||
On success it will return nil. The following errors can happen:
|
||||
|
||||
- path not found or not readable
|
||||
- no PEM block in the loaded file
|
||||
- no valid PKCS8/PKCS1 private key or PKIX public key
|
||||
- errors while marshalling
|
||||
- unsupported key types
|
||||
*/
|
||||
func (k *Key) LoadKey(path string, scheme string, KeyIDHashAlgorithms []string) error {
|
||||
pemFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer pemFile.Close()
|
||||
|
||||
err = k.LoadKeyReader(pemFile, scheme, KeyIDHashAlgorithms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return pemFile.Close()
|
||||
}
|
||||
|
||||
func (k *Key) LoadKeyDefaults(path string) error {
|
||||
pemFile, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer pemFile.Close()
|
||||
|
||||
err = k.LoadKeyReaderDefaults(pemFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return pemFile.Close()
|
||||
}
|
||||
|
||||
// LoadKeyReader loads the key from a supplied reader. The logic matches LoadKey otherwise.
|
||||
func (k *Key) LoadKeyReader(r io.Reader, scheme string, KeyIDHashAlgorithms []string) error {
|
||||
if r == nil {
|
||||
return ErrNoPEMBlock
|
||||
}
|
||||
// Read key bytes
|
||||
pemBytes, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// decodeAndParse returns the pemData for later use
|
||||
// and a parsed key object (for operations on that key, like extracting the public Key)
|
||||
pemData, key, err := decodeAndParse(pemBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return k.loadKey(key, pemData, scheme, KeyIDHashAlgorithms)
|
||||
}
|
||||
|
||||
func (k *Key) LoadKeyReaderDefaults(r io.Reader) error {
|
||||
if r == nil {
|
||||
return ErrNoPEMBlock
|
||||
}
|
||||
// Read key bytes
|
||||
pemBytes, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// decodeAndParse returns the pemData for later use
|
||||
// and a parsed key object (for operations on that key, like extracting the public Key)
|
||||
pemData, key, err := decodeAndParse(pemBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scheme, keyIDHashAlgorithms, err := getDefaultKeyScheme(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return k.loadKey(key, pemData, scheme, keyIDHashAlgorithms)
|
||||
}
|
||||
|
||||
func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms []string, err error) {
|
||||
keyIDHashAlgorithms = []string{"sha256", "sha512"}
|
||||
|
||||
switch key.(type) {
|
||||
case *rsa.PublicKey, *rsa.PrivateKey:
|
||||
scheme = rsassapsssha256Scheme
|
||||
case ed25519.PrivateKey, ed25519.PublicKey:
|
||||
scheme = ed25519Scheme
|
||||
case *ecdsa.PrivateKey, *ecdsa.PublicKey:
|
||||
scheme = ecdsaSha2nistp256
|
||||
case *x509.Certificate:
|
||||
return getDefaultKeyScheme(key.(*x509.Certificate).PublicKey)
|
||||
default:
|
||||
err = ErrUnsupportedKeyType
|
||||
}
|
||||
|
||||
return scheme, keyIDHashAlgorithms, err
|
||||
}
|
||||
|
||||
func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDHashAlgorithms []string) error {
|
||||
|
||||
switch key.(type) {
|
||||
case *rsa.PublicKey:
|
||||
pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PublicKey))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := k.setKeyComponents(pubKeyBytes, []byte{}, rsaKeyType, scheme, keyIDHashAlgorithms); err != nil {
|
||||
return err
|
||||
}
|
||||
case *rsa.PrivateKey:
|
||||
// Note: RSA Public Keys will get stored as X.509 SubjectPublicKeyInfo (RFC5280)
|
||||
// This behavior is consistent to the securesystemslib
|
||||
pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PrivateKey).Public())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := k.setKeyComponents(pubKeyBytes, pemData.Bytes, rsaKeyType, scheme, keyIDHashAlgorithms); err != nil {
|
||||
return err
|
||||
}
|
||||
case ed25519.PublicKey:
|
||||
if err := k.setKeyComponents(key.(ed25519.PublicKey), []byte{}, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil {
|
||||
return err
|
||||
}
|
||||
case ed25519.PrivateKey:
|
||||
pubKeyBytes := key.(ed25519.PrivateKey).Public()
|
||||
if err := k.setKeyComponents(pubKeyBytes.(ed25519.PublicKey), key.(ed25519.PrivateKey), ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ecdsa.PrivateKey:
|
||||
pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PrivateKey).Public())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := k.setKeyComponents(pubKeyBytes, pemData.Bytes, ecdsaKeyType, scheme, keyIDHashAlgorithms); err != nil {
|
||||
return err
|
||||
}
|
||||
case *ecdsa.PublicKey:
|
||||
pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PublicKey))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := k.setKeyComponents(pubKeyBytes, []byte{}, ecdsaKeyType, scheme, keyIDHashAlgorithms); err != nil {
|
||||
return err
|
||||
}
|
||||
case *x509.Certificate:
|
||||
err := k.loadKey(key.(*x509.Certificate).PublicKey, pemData, scheme, keyIDHashAlgorithms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
k.KeyVal.Certificate = string(pem.EncodeToMemory(pemData))
|
||||
|
||||
default:
|
||||
// We should never get here, because we implement all from Go supported Key Types
|
||||
return errors.New("unexpected Error in LoadKey function")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
GenerateSignature will automatically detect the key type and sign the signable data
|
||||
with the provided key. If everything goes right GenerateSignature will return
|
||||
a for the key valid signature and err=nil. If something goes wrong it will
|
||||
return a not initialized signature and an error. Possible errors are:
|
||||
|
||||
- ErrNoPEMBlock
|
||||
- ErrUnsupportedKeyType
|
||||
|
||||
Currently supported is only one scheme per key.
|
||||
|
||||
Note that in-toto-golang has different requirements to an ecdsa key.
|
||||
In in-toto-golang we use the string 'ecdsa' as string for the key type.
|
||||
In the key scheme we use: ecdsa-sha2-nistp256.
|
||||
*/
|
||||
func GenerateSignature(signable []byte, key Key) (Signature, error) {
|
||||
err := validateKey(key)
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
var signature Signature
|
||||
var signatureBuffer []byte
|
||||
hashMapping := getHashMapping()
|
||||
// The following switch block is needed for keeping interoperability
|
||||
// with the securesystemslib and the python implementation
|
||||
// in which we are storing RSA keys in PEM format, but ed25519 keys hex encoded.
|
||||
switch key.KeyType {
|
||||
case rsaKeyType:
|
||||
// We do not need the pemData here, so we can throw it away via '_'
|
||||
_, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private))
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
parsedKey, ok := parsedKey.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
return Signature{}, ErrKeyKeyTypeMismatch
|
||||
}
|
||||
switch key.Scheme {
|
||||
case rsassapsssha256Scheme:
|
||||
hashed := hashToHex(hashMapping["sha256"](), signable)
|
||||
// We use rand.Reader as secure random source for rsa.SignPSS()
|
||||
signatureBuffer, err = rsa.SignPSS(rand.Reader, parsedKey.(*rsa.PrivateKey), crypto.SHA256, hashed,
|
||||
&rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256})
|
||||
if err != nil {
|
||||
return signature, err
|
||||
}
|
||||
default:
|
||||
// supported key schemes will get checked in validateKey
|
||||
panic("unexpected Error in GenerateSignature function")
|
||||
}
|
||||
case ecdsaKeyType:
|
||||
// We do not need the pemData here, so we can throw it away via '_'
|
||||
_, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private))
|
||||
if err != nil {
|
||||
return Signature{}, err
|
||||
}
|
||||
parsedKey, ok := parsedKey.(*ecdsa.PrivateKey)
|
||||
if !ok {
|
||||
return Signature{}, ErrKeyKeyTypeMismatch
|
||||
}
|
||||
curveSize := parsedKey.(*ecdsa.PrivateKey).Curve.Params().BitSize
|
||||
var hashed []byte
|
||||
if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil {
|
||||
return Signature{}, ErrCurveSizeSchemeMismatch
|
||||
}
|
||||
// implement https://tools.ietf.org/html/rfc5656#section-6.2.1
|
||||
// We determine the curve size and choose the correct hashing
|
||||
// method based on the curveSize
|
||||
switch {
|
||||
case curveSize <= 256:
|
||||
hashed = hashToHex(hashMapping["sha256"](), signable)
|
||||
case 256 < curveSize && curveSize <= 384:
|
||||
hashed = hashToHex(hashMapping["sha384"](), signable)
|
||||
case curveSize > 384:
|
||||
hashed = hashToHex(hashMapping["sha512"](), signable)
|
||||
default:
|
||||
panic("unexpected Error in GenerateSignature function")
|
||||
}
|
||||
// Generate the ecdsa signature on the same way, as we do in the securesystemslib
|
||||
// We are marshalling the ecdsaSignature struct as ASN.1 INTEGER SEQUENCES
|
||||
// into an ASN.1 Object.
|
||||
signatureBuffer, err = ecdsa.SignASN1(rand.Reader, parsedKey.(*ecdsa.PrivateKey), hashed[:])
|
||||
if err != nil {
|
||||
return signature, err
|
||||
}
|
||||
case ed25519KeyType:
|
||||
// We do not need a scheme switch here, because ed25519
|
||||
// only consist of sha256 and curve25519.
|
||||
privateHex, err := hex.DecodeString(key.KeyVal.Private)
|
||||
if err != nil {
|
||||
return signature, ErrInvalidHexString
|
||||
}
|
||||
// Note: We can directly use the key for signing and do not
|
||||
// need to use ed25519.NewKeyFromSeed().
|
||||
signatureBuffer = ed25519.Sign(privateHex, signable)
|
||||
default:
|
||||
// We should never get here, because we call validateKey in the first
|
||||
// line of the function.
|
||||
panic("unexpected Error in GenerateSignature function")
|
||||
}
|
||||
signature.Sig = hex.EncodeToString(signatureBuffer)
|
||||
signature.KeyID = key.KeyID
|
||||
signature.Certificate = key.KeyVal.Certificate
|
||||
return signature, nil
|
||||
}
|
||||
|
||||
/*
|
||||
VerifySignature will verify unverified byte data via a passed key and signature.
|
||||
Supported key types are:
|
||||
|
||||
- rsa
|
||||
- ed25519
|
||||
- ecdsa
|
||||
|
||||
When encountering an RSA key, VerifySignature will decode the PEM block in the key
|
||||
and will call rsa.VerifyPSS() for verifying the RSA signature.
|
||||
When encountering an ed25519 key, VerifySignature will decode the hex string encoded
|
||||
public key and will use ed25519.Verify() for verifying the ed25519 signature.
|
||||
When the given key is an ecdsa key, VerifySignature will unmarshall the ASN1 object
|
||||
and will use the retrieved ecdsa components 'r' and 's' for verifying the signature.
|
||||
On success it will return nil. In case of an unsupported key type or any other error
|
||||
it will return an error.
|
||||
|
||||
Note that in-toto-golang has different requirements to an ecdsa key.
|
||||
In in-toto-golang we use the string 'ecdsa' as string for the key type.
|
||||
In the key scheme we use: ecdsa-sha2-nistp256.
|
||||
*/
|
||||
func VerifySignature(key Key, sig Signature, unverified []byte) error {
|
||||
err := validateKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sigBytes, err := hex.DecodeString(sig.Sig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hashMapping := getHashMapping()
|
||||
switch key.KeyType {
|
||||
case rsaKeyType:
|
||||
// We do not need the pemData here, so we can throw it away via '_'
|
||||
_, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parsedKey, ok := parsedKey.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
return ErrKeyKeyTypeMismatch
|
||||
}
|
||||
switch key.Scheme {
|
||||
case rsassapsssha256Scheme:
|
||||
hashed := hashToHex(hashMapping["sha256"](), unverified)
|
||||
err = rsa.VerifyPSS(parsedKey.(*rsa.PublicKey), crypto.SHA256, hashed, sigBytes, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256})
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w: %s", ErrInvalidSignature, err)
|
||||
}
|
||||
default:
|
||||
// supported key schemes will get checked in validateKey
|
||||
panic("unexpected Error in VerifySignature function")
|
||||
}
|
||||
case ecdsaKeyType:
|
||||
// We do not need the pemData here, so we can throw it away via '_'
|
||||
_, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parsedKey, ok := parsedKey.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
return ErrKeyKeyTypeMismatch
|
||||
}
|
||||
curveSize := parsedKey.(*ecdsa.PublicKey).Curve.Params().BitSize
|
||||
var hashed []byte
|
||||
if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil {
|
||||
return ErrCurveSizeSchemeMismatch
|
||||
}
|
||||
// implement https://tools.ietf.org/html/rfc5656#section-6.2.1
|
||||
// We determine the curve size and choose the correct hashing
|
||||
// method based on the curveSize
|
||||
switch {
|
||||
case curveSize <= 256:
|
||||
hashed = hashToHex(hashMapping["sha256"](), unverified)
|
||||
case 256 < curveSize && curveSize <= 384:
|
||||
hashed = hashToHex(hashMapping["sha384"](), unverified)
|
||||
case curveSize > 384:
|
||||
hashed = hashToHex(hashMapping["sha512"](), unverified)
|
||||
default:
|
||||
panic("unexpected Error in VerifySignature function")
|
||||
}
|
||||
if ok := ecdsa.VerifyASN1(parsedKey.(*ecdsa.PublicKey), hashed[:], sigBytes); !ok {
|
||||
return ErrInvalidSignature
|
||||
}
|
||||
case ed25519KeyType:
|
||||
// We do not need a scheme switch here, because ed25519
|
||||
// only consist of sha256 and curve25519.
|
||||
pubHex, err := hex.DecodeString(key.KeyVal.Public)
|
||||
if err != nil {
|
||||
return ErrInvalidHexString
|
||||
}
|
||||
if ok := ed25519.Verify(pubHex, unverified, sigBytes); !ok {
|
||||
return fmt.Errorf("%w: ed25519", ErrInvalidSignature)
|
||||
}
|
||||
default:
|
||||
// We should never get here, because we call validateKey in the first
|
||||
// line of the function.
|
||||
panic("unexpected Error in VerifySignature function")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
VerifyCertificateTrust verifies that the certificate has a chain of trust
|
||||
to a root in rootCertPool, possibly using any intermediates in
|
||||
intermediateCertPool
|
||||
*/
|
||||
func VerifyCertificateTrust(cert *x509.Certificate, rootCertPool, intermediateCertPool *x509.CertPool) ([][]*x509.Certificate, error) {
|
||||
verifyOptions := x509.VerifyOptions{
|
||||
Roots: rootCertPool,
|
||||
Intermediates: intermediateCertPool,
|
||||
}
|
||||
chains, err := cert.Verify(verifyOptions)
|
||||
if len(chains) == 0 || err != nil {
|
||||
return nil, fmt.Errorf("cert cannot be verified by provided roots and intermediates")
|
||||
}
|
||||
return chains, nil
|
||||
}
|
227
vendor/github.com/in-toto/in-toto-golang/in_toto/match.go
generated
vendored
Normal file
227
vendor/github.com/in-toto/in-toto-golang/in_toto/match.go
generated
vendored
Normal file
|
@ -0,0 +1,227 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found at https://golang.org/LICENSE.
|
||||
|
||||
// this is a modified version of path.Match that removes handling of path separators
|
||||
|
||||
package in_toto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// errBadPattern indicates a pattern was malformed.
|
||||
var errBadPattern = errors.New("syntax error in pattern")
|
||||
|
||||
// match reports whether name matches the shell pattern.
|
||||
// The pattern syntax is:
|
||||
//
|
||||
// pattern:
|
||||
// { term }
|
||||
// term:
|
||||
// '*' matches any sequence of non-/ characters
|
||||
// '?' matches any single non-/ character
|
||||
// '[' [ '^' ] { character-range } ']'
|
||||
// character class (must be non-empty)
|
||||
// c matches character c (c != '*', '?', '\\', '[')
|
||||
// '\\' c matches character c
|
||||
//
|
||||
// character-range:
|
||||
// c matches character c (c != '\\', '-', ']')
|
||||
// '\\' c matches character c
|
||||
// lo '-' hi matches character c for lo <= c <= hi
|
||||
//
|
||||
// Match requires pattern to match all of name, not just a substring.
|
||||
// The only possible returned error is ErrBadPattern, when pattern
|
||||
// is malformed.
|
||||
func match(pattern, name string) (matched bool, err error) {
|
||||
Pattern:
|
||||
for len(pattern) > 0 {
|
||||
var star bool
|
||||
var chunk string
|
||||
star, chunk, pattern = scanChunk(pattern)
|
||||
if star && chunk == "" {
|
||||
// Trailing * matches everything
|
||||
return true, nil
|
||||
}
|
||||
// Look for match at current position.
|
||||
t, ok, err := matchChunk(chunk, name)
|
||||
// if we're the last chunk, make sure we've exhausted the name
|
||||
// otherwise we'll give a false result even if we could still match
|
||||
// using the star
|
||||
if ok && (len(t) == 0 || len(pattern) > 0) {
|
||||
name = t
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if star {
|
||||
// Look for match skipping i+1 bytes.
|
||||
for i := 0; i < len(name); i++ {
|
||||
t, ok, err := matchChunk(chunk, name[i+1:])
|
||||
if ok {
|
||||
// if we're the last chunk, make sure we exhausted the name
|
||||
if len(pattern) == 0 && len(t) > 0 {
|
||||
continue
|
||||
}
|
||||
name = t
|
||||
continue Pattern
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Before returning false with no error,
|
||||
// check that the remainder of the pattern is syntactically valid.
|
||||
for len(pattern) > 0 {
|
||||
_, chunk, pattern = scanChunk(pattern)
|
||||
if _, _, err := matchChunk(chunk, ""); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
return len(name) == 0, nil
|
||||
}
|
||||
|
||||
// scanChunk gets the next segment of pattern, which is a non-star string
|
||||
// possibly preceded by a star.
|
||||
func scanChunk(pattern string) (star bool, chunk, rest string) {
|
||||
for len(pattern) > 0 && pattern[0] == '*' {
|
||||
pattern = pattern[1:]
|
||||
star = true
|
||||
}
|
||||
inrange := false
|
||||
var i int
|
||||
Scan:
|
||||
for i = 0; i < len(pattern); i++ {
|
||||
switch pattern[i] {
|
||||
case '\\':
|
||||
// error check handled in matchChunk: bad pattern.
|
||||
if i+1 < len(pattern) {
|
||||
i++
|
||||
}
|
||||
case '[':
|
||||
inrange = true
|
||||
case ']':
|
||||
inrange = false
|
||||
case '*':
|
||||
if !inrange {
|
||||
break Scan
|
||||
}
|
||||
}
|
||||
}
|
||||
return star, pattern[0:i], pattern[i:]
|
||||
}
|
||||
|
||||
// matchChunk checks whether chunk matches the beginning of s.
|
||||
// If so, it returns the remainder of s (after the match).
|
||||
// Chunk is all single-character operators: literals, char classes, and ?.
|
||||
func matchChunk(chunk, s string) (rest string, ok bool, err error) {
|
||||
// failed records whether the match has failed.
|
||||
// After the match fails, the loop continues on processing chunk,
|
||||
// checking that the pattern is well-formed but no longer reading s.
|
||||
failed := false
|
||||
for len(chunk) > 0 {
|
||||
if !failed && len(s) == 0 {
|
||||
failed = true
|
||||
}
|
||||
switch chunk[0] {
|
||||
case '[':
|
||||
// character class
|
||||
var r rune
|
||||
if !failed {
|
||||
var n int
|
||||
r, n = utf8.DecodeRuneInString(s)
|
||||
s = s[n:]
|
||||
}
|
||||
chunk = chunk[1:]
|
||||
// possibly negated
|
||||
negated := false
|
||||
if len(chunk) > 0 && chunk[0] == '^' {
|
||||
negated = true
|
||||
chunk = chunk[1:]
|
||||
}
|
||||
// parse all ranges
|
||||
match := false
|
||||
nrange := 0
|
||||
for {
|
||||
if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
|
||||
chunk = chunk[1:]
|
||||
break
|
||||
}
|
||||
var lo, hi rune
|
||||
if lo, chunk, err = getEsc(chunk); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
hi = lo
|
||||
if chunk[0] == '-' {
|
||||
if hi, chunk, err = getEsc(chunk[1:]); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
}
|
||||
if lo <= r && r <= hi {
|
||||
match = true
|
||||
}
|
||||
nrange++
|
||||
}
|
||||
if match == negated {
|
||||
failed = true
|
||||
}
|
||||
|
||||
case '?':
|
||||
if !failed {
|
||||
_, n := utf8.DecodeRuneInString(s)
|
||||
s = s[n:]
|
||||
}
|
||||
chunk = chunk[1:]
|
||||
|
||||
case '\\':
|
||||
chunk = chunk[1:]
|
||||
if len(chunk) == 0 {
|
||||
return "", false, errBadPattern
|
||||
}
|
||||
fallthrough
|
||||
|
||||
default:
|
||||
if !failed {
|
||||
if chunk[0] != s[0] {
|
||||
failed = true
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
chunk = chunk[1:]
|
||||
}
|
||||
}
|
||||
if failed {
|
||||
return "", false, nil
|
||||
}
|
||||
return s, true, nil
|
||||
}
|
||||
|
||||
// getEsc gets a possibly-escaped character from chunk, for a character class.
|
||||
func getEsc(chunk string) (r rune, nchunk string, err error) {
|
||||
if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
|
||||
err = errBadPattern
|
||||
return
|
||||
}
|
||||
if chunk[0] == '\\' {
|
||||
chunk = chunk[1:]
|
||||
if len(chunk) == 0 {
|
||||
err = errBadPattern
|
||||
return
|
||||
}
|
||||
}
|
||||
r, n := utf8.DecodeRuneInString(chunk)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
err = errBadPattern
|
||||
}
|
||||
nchunk = chunk[n:]
|
||||
if len(nchunk) == 0 {
|
||||
err = errBadPattern
|
||||
}
|
||||
return
|
||||
}
|
1073
vendor/github.com/in-toto/in-toto-golang/in_toto/model.go
generated
vendored
Normal file
1073
vendor/github.com/in-toto/in-toto-golang/in_toto/model.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
131
vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go
generated
vendored
Normal file
131
vendor/github.com/in-toto/in-toto-golang/in_toto/rulelib.go
generated
vendored
Normal file
|
@ -0,0 +1,131 @@
|
|||
package in_toto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// An error message issued in UnpackRule if it receives a malformed rule.
|
||||
var errorMsg = "Wrong rule format, available formats are:\n" +
|
||||
"\tMATCH <pattern> [IN <source-path-prefix>] WITH (MATERIALS|PRODUCTS)" +
|
||||
" [IN <destination-path-prefix>] FROM <step>,\n" +
|
||||
"\tCREATE <pattern>,\n" +
|
||||
"\tDELETE <pattern>,\n" +
|
||||
"\tMODIFY <pattern>,\n" +
|
||||
"\tALLOW <pattern>,\n" +
|
||||
"\tDISALLOW <pattern>,\n" +
|
||||
"\tREQUIRE <filename>\n\n"
|
||||
|
||||
/*
|
||||
UnpackRule parses the passed rule and extracts and returns the information
|
||||
required for rule processing. It can be used to verify if a rule has a valid
|
||||
format. Available rule formats are:
|
||||
|
||||
MATCH <pattern> [IN <source-path-prefix>] WITH (MATERIALS|PRODUCTS)
|
||||
[IN <destination-path-prefix>] FROM <step>,
|
||||
CREATE <pattern>,
|
||||
DELETE <pattern>,
|
||||
MODIFY <pattern>,
|
||||
ALLOW <pattern>,
|
||||
DISALLOW <pattern>
|
||||
|
||||
Rule tokens are normalized to lower case before returning. The returned map
|
||||
has the following format:
|
||||
|
||||
{
|
||||
"type": "match" | "create" | "delete" |"modify" | "allow" | "disallow"
|
||||
"pattern": "<file name pattern>",
|
||||
"srcPrefix": "<path or empty string>", // MATCH rule only
|
||||
"dstPrefix": "<path or empty string>", // MATCH rule only
|
||||
"dstType": "materials" | "products">, // MATCH rule only
|
||||
"dstName": "<step name>", // Match rule only
|
||||
}
|
||||
|
||||
If the rule does not match any of the available formats the first return value
|
||||
is nil and the second return value is the error.
|
||||
*/
|
||||
func UnpackRule(rule []string) (map[string]string, error) {
|
||||
// Cache rule len
|
||||
ruleLen := len(rule)
|
||||
|
||||
// Create all lower rule copy to case-insensitively parse out tokens whose
|
||||
// position we don't know yet. We keep the original rule to retain the
|
||||
// non-token elements' case.
|
||||
ruleLower := make([]string, ruleLen)
|
||||
for i, val := range rule {
|
||||
ruleLower[i] = strings.ToLower(val)
|
||||
}
|
||||
|
||||
switch ruleLower[0] {
|
||||
case "create", "modify", "delete", "allow", "disallow", "require":
|
||||
if ruleLen != 2 {
|
||||
return nil,
|
||||
fmt.Errorf("%s Got:\n\t %s", errorMsg, rule)
|
||||
}
|
||||
|
||||
return map[string]string{
|
||||
"type": ruleLower[0],
|
||||
"pattern": rule[1],
|
||||
}, nil
|
||||
|
||||
case "match":
|
||||
var srcPrefix string
|
||||
var dstType string
|
||||
var dstPrefix string
|
||||
var dstName string
|
||||
|
||||
// MATCH <pattern> IN <source-path-prefix> WITH (MATERIALS|PRODUCTS) \
|
||||
// IN <destination-path-prefix> FROM <step>
|
||||
if ruleLen == 10 && ruleLower[2] == "in" &&
|
||||
ruleLower[4] == "with" && ruleLower[6] == "in" &&
|
||||
ruleLower[8] == "from" {
|
||||
srcPrefix = rule[3]
|
||||
dstType = ruleLower[5]
|
||||
dstPrefix = rule[7]
|
||||
dstName = rule[9]
|
||||
// MATCH <pattern> IN <source-path-prefix> WITH (MATERIALS|PRODUCTS) \
|
||||
// FROM <step>
|
||||
} else if ruleLen == 8 && ruleLower[2] == "in" &&
|
||||
ruleLower[4] == "with" && ruleLower[6] == "from" {
|
||||
srcPrefix = rule[3]
|
||||
dstType = ruleLower[5]
|
||||
dstPrefix = ""
|
||||
dstName = rule[7]
|
||||
|
||||
// MATCH <pattern> WITH (MATERIALS|PRODUCTS) IN <destination-path-prefix>
|
||||
// FROM <step>
|
||||
} else if ruleLen == 8 && ruleLower[2] == "with" &&
|
||||
ruleLower[4] == "in" && ruleLower[6] == "from" {
|
||||
srcPrefix = ""
|
||||
dstType = ruleLower[3]
|
||||
dstPrefix = rule[5]
|
||||
dstName = rule[7]
|
||||
|
||||
// MATCH <pattern> WITH (MATERIALS|PRODUCTS) FROM <step>
|
||||
} else if ruleLen == 6 && ruleLower[2] == "with" &&
|
||||
ruleLower[4] == "from" {
|
||||
srcPrefix = ""
|
||||
dstType = ruleLower[3]
|
||||
dstPrefix = ""
|
||||
dstName = rule[5]
|
||||
|
||||
} else {
|
||||
return nil,
|
||||
fmt.Errorf("%s Got:\n\t %s", errorMsg, rule)
|
||||
|
||||
}
|
||||
|
||||
return map[string]string{
|
||||
"type": ruleLower[0],
|
||||
"pattern": rule[1],
|
||||
"srcPrefix": srcPrefix,
|
||||
"dstPrefix": dstPrefix,
|
||||
"dstType": dstType,
|
||||
"dstName": dstName,
|
||||
}, nil
|
||||
|
||||
default:
|
||||
return nil,
|
||||
fmt.Errorf("%s Got:\n\t %s", errorMsg, rule)
|
||||
}
|
||||
}
|
409
vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go
generated
vendored
Normal file
409
vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go
generated
vendored
Normal file
|
@ -0,0 +1,409 @@
|
|||
package in_toto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/shibumi/go-pathspec"
|
||||
)
|
||||
|
||||
// ErrSymCycle signals a detected symlink cycle in our RecordArtifacts() function.
|
||||
var ErrSymCycle = errors.New("symlink cycle detected")
|
||||
|
||||
// ErrUnsupportedHashAlgorithm signals a missing hash mapping in getHashMapping
|
||||
var ErrUnsupportedHashAlgorithm = errors.New("unsupported hash algorithm detected")
|
||||
|
||||
var ErrEmptyCommandArgs = errors.New("the command args are empty")
|
||||
|
||||
// visitedSymlinks is a hashset that contains all paths that we have visited.
|
||||
var visitedSymlinks Set
|
||||
|
||||
/*
|
||||
RecordArtifact reads and hashes the contents of the file at the passed path
|
||||
using sha256 and returns a map in the following format:
|
||||
|
||||
{
|
||||
"<path>": {
|
||||
"sha256": <hex representation of hash>
|
||||
}
|
||||
}
|
||||
|
||||
If reading the file fails, the first return value is nil and the second return
|
||||
value is the error.
|
||||
NOTE: For cross-platform consistency Windows-style line separators (CRLF) are
|
||||
normalized to Unix-style line separators (LF) before hashing file contents.
|
||||
*/
|
||||
func RecordArtifact(path string, hashAlgorithms []string, lineNormalization bool) (map[string]interface{}, error) {
|
||||
supportedHashMappings := getHashMapping()
|
||||
// Read file from passed path
|
||||
contents, err := ioutil.ReadFile(path)
|
||||
hashedContentsMap := make(map[string]interface{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if lineNormalization {
|
||||
// "Normalize" file contents. We convert all line separators to '\n'
|
||||
// for keeping operating system independence
|
||||
contents = bytes.ReplaceAll(contents, []byte("\r\n"), []byte("\n"))
|
||||
contents = bytes.ReplaceAll(contents, []byte("\r"), []byte("\n"))
|
||||
}
|
||||
|
||||
// Create a map of all the hashes present in the hash_func list
|
||||
for _, element := range hashAlgorithms {
|
||||
if _, ok := supportedHashMappings[element]; !ok {
|
||||
return nil, fmt.Errorf("%w: %s", ErrUnsupportedHashAlgorithm, element)
|
||||
}
|
||||
h := supportedHashMappings[element]
|
||||
result := fmt.Sprintf("%x", hashToHex(h(), contents))
|
||||
hashedContentsMap[element] = result
|
||||
}
|
||||
|
||||
// Return it in a format that is conformant with link metadata artifacts
|
||||
return hashedContentsMap, nil
|
||||
}
|
||||
|
||||
/*
|
||||
RecordArtifacts is a wrapper around recordArtifacts.
|
||||
RecordArtifacts initializes a set for storing visited symlinks,
|
||||
calls recordArtifacts and deletes the set if no longer needed.
|
||||
recordArtifacts walks through the passed slice of paths, traversing
|
||||
subdirectories, and calls RecordArtifact for each file. It returns a map in
|
||||
the following format:
|
||||
|
||||
{
|
||||
"<path>": {
|
||||
"sha256": <hex representation of hash>
|
||||
},
|
||||
"<path>": {
|
||||
"sha256": <hex representation of hash>
|
||||
},
|
||||
...
|
||||
}
|
||||
|
||||
If recording an artifact fails the first return value is nil and the second
|
||||
return value is the error.
|
||||
*/
|
||||
func RecordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (evalArtifacts map[string]interface{}, err error) {
|
||||
// Make sure to initialize a fresh hashset for every RecordArtifacts call
|
||||
visitedSymlinks = NewSet()
|
||||
evalArtifacts, err = recordArtifacts(paths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization)
|
||||
// pass result and error through
|
||||
return evalArtifacts, err
|
||||
}
|
||||
|
||||
/*
|
||||
recordArtifacts walks through the passed slice of paths, traversing
|
||||
subdirectories, and calls RecordArtifact for each file. It returns a map in
|
||||
the following format:
|
||||
|
||||
{
|
||||
"<path>": {
|
||||
"sha256": <hex representation of hash>
|
||||
},
|
||||
"<path>": {
|
||||
"sha256": <hex representation of hash>
|
||||
},
|
||||
...
|
||||
}
|
||||
|
||||
If recording an artifact fails the first return value is nil and the second
|
||||
return value is the error.
|
||||
*/
|
||||
func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (map[string]interface{}, error) {
|
||||
artifacts := make(map[string]interface{})
|
||||
for _, path := range paths {
|
||||
err := filepath.Walk(path,
|
||||
func(path string, info os.FileInfo, err error) error {
|
||||
// Abort if Walk function has a problem,
|
||||
// e.g. path does not exist
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We need to call pathspec.GitIgnore inside of our filepath.Walk, because otherwise
|
||||
// we will not catch all paths. Just imagine a path like "." and a pattern like "*.pub".
|
||||
// If we would call pathspec outside of the filepath.Walk this would not match.
|
||||
ignore, err := pathspec.GitIgnore(gitignorePatterns, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ignore {
|
||||
return nil
|
||||
}
|
||||
// Don't hash directories
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// check for symlink and evaluate the last element in a symlink
|
||||
// chain via filepath.EvalSymlinks. We use EvalSymlinks here,
|
||||
// because with os.Readlink() we would just read the next
|
||||
// element in a possible symlink chain. This would mean more
|
||||
// iterations. infoMode()&os.ModeSymlink uses the file
|
||||
// type bitmask to check for a symlink.
|
||||
if info.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
// return with error if we detect a symlink cycle
|
||||
if ok := visitedSymlinks.Has(path); ok {
|
||||
// this error will get passed through
|
||||
// to RecordArtifacts()
|
||||
return ErrSymCycle
|
||||
}
|
||||
evalSym, err := filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// add symlink to visitedSymlinks set
|
||||
// this way, we know which link we have visited already
|
||||
// if we visit a symlink twice, we have detected a symlink cycle
|
||||
visitedSymlinks.Add(path)
|
||||
// We recursively call RecordArtifacts() to follow
|
||||
// the new path.
|
||||
evalArtifacts, evalErr := recordArtifacts([]string{evalSym}, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization)
|
||||
if evalErr != nil {
|
||||
return evalErr
|
||||
}
|
||||
for key, value := range evalArtifacts {
|
||||
artifacts[key] = value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
artifact, err := RecordArtifact(path, hashAlgorithms, lineNormalization)
|
||||
// Abort if artifact can't be recorded, e.g.
|
||||
// due to file permissions
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, strip := range lStripPaths {
|
||||
if strings.HasPrefix(path, strip) {
|
||||
path = strings.TrimPrefix(path, strip)
|
||||
break
|
||||
}
|
||||
}
|
||||
// Check if path is unique
|
||||
_, existingPath := artifacts[path]
|
||||
if existingPath {
|
||||
return fmt.Errorf("left stripping has resulted in non unique dictionary key: %s", path)
|
||||
}
|
||||
artifacts[path] = artifact
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return artifacts, nil
|
||||
}
|
||||
|
||||
/*
|
||||
waitErrToExitCode converts an error returned by Cmd.wait() to an exit code. It
|
||||
returns -1 if no exit code can be inferred.
|
||||
*/
|
||||
func waitErrToExitCode(err error) int {
|
||||
// If there's no exit code, we return -1
|
||||
retVal := -1
|
||||
|
||||
// See https://stackoverflow.com/questions/10385551/get-exit-code-go
|
||||
if err != nil {
|
||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
||||
// The program has exited with an exit code != 0
|
||||
// This works on both Unix and Windows. Although package
|
||||
// syscall is generally platform dependent, WaitStatus is
|
||||
// defined for both Unix and Windows and in both cases has
|
||||
// an ExitStatus() method with the same signature.
|
||||
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
|
||||
retVal = status.ExitStatus()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
retVal = 0
|
||||
}
|
||||
|
||||
return retVal
|
||||
}
|
||||
|
||||
/*
|
||||
RunCommand executes the passed command in a subprocess. The first element of
|
||||
cmdArgs is used as executable and the rest as command arguments. It captures
|
||||
and returns stdout, stderr and exit code. The format of the returned map is:
|
||||
|
||||
{
|
||||
"return-value": <exit code>,
|
||||
"stdout": "<standard output>",
|
||||
"stderr": "<standard error>"
|
||||
}
|
||||
|
||||
If the command cannot be executed or no pipes for stdout or stderr can be
|
||||
created the first return value is nil and the second return value is the error.
|
||||
NOTE: Since stdout and stderr are captured, they cannot be seen during the
|
||||
command execution.
|
||||
*/
|
||||
func RunCommand(cmdArgs []string, runDir string) (map[string]interface{}, error) {
|
||||
if len(cmdArgs) == 0 {
|
||||
return nil, ErrEmptyCommandArgs
|
||||
}
|
||||
|
||||
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
|
||||
|
||||
if runDir != "" {
|
||||
cmd.Dir = runDir
|
||||
}
|
||||
|
||||
stderrPipe, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stdoutPipe, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: duplicate stdout, stderr
|
||||
stdout, _ := ioutil.ReadAll(stdoutPipe)
|
||||
stderr, _ := ioutil.ReadAll(stderrPipe)
|
||||
|
||||
retVal := waitErrToExitCode(cmd.Wait())
|
||||
|
||||
return map[string]interface{}{
|
||||
"return-value": float64(retVal),
|
||||
"stdout": string(stdout),
|
||||
"stderr": string(stderr),
|
||||
}, nil
|
||||
}
|
||||
|
||||
/*
|
||||
InTotoRun executes commands, e.g. for software supply chain steps or
|
||||
inspections of an in-toto layout, and creates and returns corresponding link
|
||||
metadata. Link metadata contains recorded products at the passed productPaths
|
||||
and materials at the passed materialPaths. The returned link is wrapped in a
|
||||
Metablock object. If command execution or artifact recording fails the first
|
||||
return value is an empty Metablock and the second return value is the error.
|
||||
*/
|
||||
func InTotoRun(name string, runDir string, materialPaths []string, productPaths []string,
|
||||
cmdArgs []string, key Key, hashAlgorithms []string, gitignorePatterns []string,
|
||||
lStripPaths []string, lineNormalization bool) (Metablock, error) {
|
||||
var linkMb Metablock
|
||||
|
||||
materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization)
|
||||
if err != nil {
|
||||
return linkMb, err
|
||||
}
|
||||
|
||||
// make sure that we only run RunCommand if cmdArgs is not nil or empty
|
||||
byProducts := map[string]interface{}{}
|
||||
if len(cmdArgs) != 0 {
|
||||
byProducts, err = RunCommand(cmdArgs, runDir)
|
||||
if err != nil {
|
||||
return linkMb, err
|
||||
}
|
||||
}
|
||||
|
||||
products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization)
|
||||
if err != nil {
|
||||
return linkMb, err
|
||||
}
|
||||
|
||||
linkMb.Signed = Link{
|
||||
Type: "link",
|
||||
Name: name,
|
||||
Materials: materials,
|
||||
Products: products,
|
||||
ByProducts: byProducts,
|
||||
Command: cmdArgs,
|
||||
Environment: map[string]interface{}{},
|
||||
}
|
||||
|
||||
linkMb.Signatures = []Signature{}
|
||||
// We use a new feature from Go1.13 here, to check the key struct.
|
||||
// IsZero() will return True, if the key hasn't been initialized
|
||||
|
||||
// with other values than the default ones.
|
||||
if !reflect.ValueOf(key).IsZero() {
|
||||
if err := linkMb.Sign(key); err != nil {
|
||||
return linkMb, err
|
||||
}
|
||||
}
|
||||
|
||||
return linkMb, nil
|
||||
}
|
||||
|
||||
/*
|
||||
InTotoRecordStart begins the creation of a link metablock file in two steps,
|
||||
in order to provide evidence for supply chain steps that cannot be carries out
|
||||
by a single command. InTotoRecordStart collects the hashes of the materials
|
||||
before any commands are run, signs the unfinished link, and returns the link.
|
||||
*/
|
||||
func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) {
|
||||
var linkMb Metablock
|
||||
materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization)
|
||||
if err != nil {
|
||||
return linkMb, err
|
||||
}
|
||||
|
||||
linkMb.Signed = Link{
|
||||
Type: "link",
|
||||
Name: name,
|
||||
Materials: materials,
|
||||
Products: map[string]interface{}{},
|
||||
ByProducts: map[string]interface{}{},
|
||||
Command: []string{},
|
||||
Environment: map[string]interface{}{},
|
||||
}
|
||||
|
||||
if !reflect.ValueOf(key).IsZero() {
|
||||
if err := linkMb.Sign(key); err != nil {
|
||||
return linkMb, err
|
||||
}
|
||||
}
|
||||
|
||||
return linkMb, nil
|
||||
}
|
||||
|
||||
/*
|
||||
InTotoRecordStop ends the creation of a metatadata link file created by
|
||||
InTotoRecordStart. InTotoRecordStop takes in a signed unfinished link metablock
|
||||
created by InTotoRecordStart and records the hashes of any products creted by
|
||||
commands run between InTotoRecordStart and InTotoRecordStop. The resultant
|
||||
finished link metablock is then signed by the provided key and returned.
|
||||
*/
|
||||
func InTotoRecordStop(prelimLinkMb Metablock, productPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) {
|
||||
var linkMb Metablock
|
||||
if err := prelimLinkMb.VerifySignature(key); err != nil {
|
||||
return linkMb, err
|
||||
}
|
||||
|
||||
link, ok := prelimLinkMb.Signed.(Link)
|
||||
if !ok {
|
||||
return linkMb, errors.New("invalid metadata block")
|
||||
}
|
||||
|
||||
products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization)
|
||||
if err != nil {
|
||||
return linkMb, err
|
||||
}
|
||||
|
||||
link.Products = products
|
||||
linkMb.Signed = link
|
||||
|
||||
if !reflect.ValueOf(key).IsZero() {
|
||||
if err := linkMb.Sign(key); err != nil {
|
||||
return linkMb, err
|
||||
}
|
||||
}
|
||||
|
||||
return linkMb, nil
|
||||
}
|
16
vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go
generated
vendored
Normal file
16
vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common/common.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
package common
|
||||
|
||||
// DigestSet contains a set of digests. It is represented as a map from
|
||||
// algorithm name to lowercase hex-encoded value.
|
||||
type DigestSet map[string]string
|
||||
|
||||
// ProvenanceBuilder idenfifies the entity that executed the build steps.
|
||||
type ProvenanceBuilder struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// ProvenanceMaterial defines the materials used to build an artifact.
|
||||
type ProvenanceMaterial struct {
|
||||
URI string `json:"uri,omitempty"`
|
||||
Digest DigestSet `json:"digest,omitempty"`
|
||||
}
|
50
vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go
generated
vendored
Normal file
50
vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1/provenance.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
package v01
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
|
||||
)
|
||||
|
||||
const (
|
||||
// PredicateSLSAProvenance represents a build provenance for an artifact.
|
||||
PredicateSLSAProvenance = "https://slsa.dev/provenance/v0.1"
|
||||
)
|
||||
|
||||
// ProvenancePredicate is the provenance predicate definition.
|
||||
type ProvenancePredicate struct {
|
||||
Builder common.ProvenanceBuilder `json:"builder"`
|
||||
Recipe ProvenanceRecipe `json:"recipe"`
|
||||
Metadata *ProvenanceMetadata `json:"metadata,omitempty"`
|
||||
Materials []common.ProvenanceMaterial `json:"materials,omitempty"`
|
||||
}
|
||||
|
||||
// ProvenanceRecipe describes the actions performed by the builder.
|
||||
type ProvenanceRecipe struct {
|
||||
Type string `json:"type"`
|
||||
// DefinedInMaterial can be sent as the null pointer to indicate that
|
||||
// the value is not present.
|
||||
DefinedInMaterial *int `json:"definedInMaterial,omitempty"`
|
||||
EntryPoint string `json:"entryPoint"`
|
||||
Arguments interface{} `json:"arguments,omitempty"`
|
||||
Environment interface{} `json:"environment,omitempty"`
|
||||
}
|
||||
|
||||
// ProvenanceMetadata contains metadata for the built artifact.
|
||||
type ProvenanceMetadata struct {
|
||||
// Use pointer to make sure that the abscense of a time is not
|
||||
// encoded as the Epoch time.
|
||||
BuildStartedOn *time.Time `json:"buildStartedOn,omitempty"`
|
||||
BuildFinishedOn *time.Time `json:"buildFinishedOn,omitempty"`
|
||||
Completeness ProvenanceComplete `json:"completeness"`
|
||||
Reproducible bool `json:"reproducible"`
|
||||
}
|
||||
|
||||
// ProvenanceComplete indicates wheter the claims in build/recipe are complete.
|
||||
// For in depth information refer to the specifictaion:
|
||||
// https://github.com/in-toto/attestation/blob/v0.1.0/spec/predicates/provenance.md
|
||||
type ProvenanceComplete struct {
|
||||
Arguments bool `json:"arguments"`
|
||||
Environment bool `json:"environment"`
|
||||
Materials bool `json:"materials"`
|
||||
}
|
137
vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go
generated
vendored
Normal file
137
vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go
generated
vendored
Normal file
|
@ -0,0 +1,137 @@
|
|||
package v02
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
|
||||
)
|
||||
|
||||
const (
|
||||
// PredicateSLSAProvenance represents a build provenance for an artifact.
|
||||
PredicateSLSAProvenance = "https://slsa.dev/provenance/v0.2"
|
||||
)
|
||||
|
||||
// ProvenancePredicate is the provenance predicate definition.
|
||||
type ProvenancePredicate struct {
|
||||
// Builder identifies the entity that executed the invocation, which is trusted to have
|
||||
// correctly performed the operation and populated this provenance.
|
||||
//
|
||||
// The identity MUST reflect the trust base that consumers care about. How detailed to be is a
|
||||
// judgement call. For example, GitHub Actions supports both GitHub-hosted runners and
|
||||
// self-hosted runners. The GitHub-hosted runner might be a single identity because it’s all
|
||||
// GitHub from the consumer’s perspective. Meanwhile, each self-hosted runner might have its
|
||||
// own identity because not all runners are trusted by all consumers.
|
||||
Builder common.ProvenanceBuilder `json:"builder"`
|
||||
|
||||
// BuildType is a URI indicating what type of build was performed. It determines the meaning of
|
||||
// [Invocation], [BuildConfig] and [Materials].
|
||||
BuildType string `json:"buildType"`
|
||||
|
||||
// Invocation identifies the event that kicked off the build. When combined with materials,
|
||||
// this SHOULD fully describe the build, such that re-running this invocation results in
|
||||
// bit-for-bit identical output (if the build is reproducible).
|
||||
//
|
||||
// MAY be unset/null if unknown, but this is DISCOURAGED.
|
||||
Invocation ProvenanceInvocation `json:"invocation,omitempty"`
|
||||
|
||||
// BuildConfig lists the steps in the build. If [ProvenanceInvocation.ConfigSource] is not
|
||||
// available, BuildConfig can be used to verify information about the build.
|
||||
//
|
||||
// This is an arbitrary JSON object with a schema defined by [BuildType].
|
||||
BuildConfig interface{} `json:"buildConfig,omitempty"`
|
||||
|
||||
// Metadata contains other properties of the build.
|
||||
Metadata *ProvenanceMetadata `json:"metadata,omitempty"`
|
||||
|
||||
// Materials is the collection of artifacts that influenced the build including sources,
|
||||
// dependencies, build tools, base images, and so on.
|
||||
//
|
||||
// This is considered to be incomplete unless metadata.completeness.materials is true.
|
||||
Materials []common.ProvenanceMaterial `json:"materials,omitempty"`
|
||||
}
|
||||
|
||||
// ProvenanceInvocation identifies the event that kicked off the build.
|
||||
type ProvenanceInvocation struct {
|
||||
// ConfigSource describes where the config file that kicked off the build came from. This is
|
||||
// effectively a pointer to the source where [ProvenancePredicate.BuildConfig] came from.
|
||||
ConfigSource ConfigSource `json:"configSource,omitempty"`
|
||||
|
||||
// Parameters is a collection of all external inputs that influenced the build on top of
|
||||
// ConfigSource. For example, if the invocation type were “make”, then this might be the
|
||||
// flags passed to make aside from the target, which is captured in [ConfigSource.EntryPoint].
|
||||
//
|
||||
// Consumers SHOULD accept only “safe” Parameters. The simplest and safest way to
|
||||
// achieve this is to disallow any parameters altogether.
|
||||
//
|
||||
// This is an arbitrary JSON object with a schema defined by buildType.
|
||||
Parameters interface{} `json:"parameters,omitempty"`
|
||||
|
||||
// Environment contains any other builder-controlled inputs necessary for correctly evaluating
|
||||
// the build. Usually only needed for reproducing the build but not evaluated as part of
|
||||
// policy.
|
||||
//
|
||||
// This SHOULD be minimized to only include things that are part of the public API, that cannot
|
||||
// be recomputed from other values in the provenance, and that actually affect the evaluation
|
||||
// of the build. For example, this might include variables that are referenced in the workflow
|
||||
// definition, but it SHOULD NOT include a dump of all environment variables or include things
|
||||
// like the hostname (assuming hostname is not part of the public API).
|
||||
Environment interface{} `json:"environment,omitempty"`
|
||||
}
|
||||
|
||||
type ConfigSource struct {
|
||||
// URI indicating the identity of the source of the config.
|
||||
URI string `json:"uri,omitempty"`
|
||||
// Digest is a collection of cryptographic digests for the contents of the artifact specified
|
||||
// by [URI].
|
||||
Digest common.DigestSet `json:"digest,omitempty"`
|
||||
// EntryPoint identifying the entry point into the build. This is often a path to a
|
||||
// configuration file and/or a target label within that file. The syntax and meaning are
|
||||
// defined by buildType. For example, if the buildType were “make”, then this would reference
|
||||
// the directory in which to run make as well as which target to use.
|
||||
//
|
||||
// Consumers SHOULD accept only specific [ProvenanceInvocation.EntryPoint] values. For example,
|
||||
// a policy might only allow the "release" entry point but not the "debug" entry point.
|
||||
// MAY be omitted if the buildType specifies a default value.
|
||||
EntryPoint string `json:"entryPoint,omitempty"`
|
||||
}
|
||||
|
||||
// ProvenanceMetadata contains metadata for the built artifact.
|
||||
type ProvenanceMetadata struct {
|
||||
// BuildInvocationID identifies this particular build invocation, which can be useful for
|
||||
// finding associated logs or other ad-hoc analysis. The exact meaning and format is defined
|
||||
// by [common.ProvenanceBuilder.ID]; by default it is treated as opaque and case-sensitive.
|
||||
// The value SHOULD be globally unique.
|
||||
BuildInvocationID string `json:"buildInvocationID,omitempty"`
|
||||
|
||||
// BuildStartedOn is the timestamp of when the build started.
|
||||
//
|
||||
// Use pointer to make sure that the abscense of a time is not
|
||||
// encoded as the Epoch time.
|
||||
BuildStartedOn *time.Time `json:"buildStartedOn,omitempty"`
|
||||
// BuildFinishedOn is the timestamp of when the build completed.
|
||||
BuildFinishedOn *time.Time `json:"buildFinishedOn,omitempty"`
|
||||
|
||||
// Completeness indicates that the builder claims certain fields in this message to be
|
||||
// complete.
|
||||
Completeness ProvenanceComplete `json:"completeness"`
|
||||
|
||||
// Reproducible if true, means the builder claims that running invocation on materials will
|
||||
// produce bit-for-bit identical output.
|
||||
Reproducible bool `json:"reproducible"`
|
||||
}
|
||||
|
||||
// ProvenanceComplete indicates wheter the claims in build/recipe are complete.
|
||||
// For in depth information refer to the specifictaion:
|
||||
// https://github.com/in-toto/attestation/blob/v0.1.0/spec/predicates/provenance.md
|
||||
type ProvenanceComplete struct {
|
||||
// Parameters if true, means the builder claims that [ProvenanceInvocation.Parameters] is
|
||||
// complete, meaning that all external inputs are properly captured in
|
||||
// ProvenanceInvocation.Parameters.
|
||||
Parameters bool `json:"parameters"`
|
||||
// Environment if true, means the builder claims that [ProvenanceInvocation.Environment] is
|
||||
// complete.
|
||||
Environment bool `json:"environment"`
|
||||
// Materials if true, means the builder claims that materials is complete, usually through some
|
||||
// controls to prevent network access. Sometimes called “hermetic”.
|
||||
Materials bool `json:"materials"`
|
||||
}
|
147
vendor/github.com/in-toto/in-toto-golang/in_toto/util.go
generated
vendored
Normal file
147
vendor/github.com/in-toto/in-toto-golang/in_toto/util.go
generated
vendored
Normal file
|
@ -0,0 +1,147 @@
|
|||
package in_toto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
/*
|
||||
Set represents a data structure for set operations. See `NewSet` for how to
|
||||
create a Set, and available Set receivers for useful set operations.
|
||||
|
||||
Under the hood Set aliases map[string]struct{}, where the map keys are the set
|
||||
elements and the map values are a memory-efficient way of storing the keys.
|
||||
*/
|
||||
type Set map[string]struct{}
|
||||
|
||||
/*
|
||||
NewSet creates a new Set, assigns it the optionally passed variadic string
|
||||
elements, and returns it.
|
||||
*/
|
||||
func NewSet(elems ...string) Set {
|
||||
var s Set = make(map[string]struct{})
|
||||
for _, elem := range elems {
|
||||
s.Add(elem)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
/*
|
||||
Has returns True if the passed string is member of the set on which it was
|
||||
called and False otherwise.
|
||||
*/
|
||||
func (s Set) Has(elem string) bool {
|
||||
_, ok := s[elem]
|
||||
return ok
|
||||
}
|
||||
|
||||
/*
|
||||
Add adds the passed string to the set on which it was called, if the string is
|
||||
not a member of the set.
|
||||
*/
|
||||
func (s Set) Add(elem string) {
|
||||
s[elem] = struct{}{}
|
||||
}
|
||||
|
||||
/*
|
||||
Remove removes the passed string from the set on which was is called, if the
|
||||
string is a member of the set.
|
||||
*/
|
||||
func (s Set) Remove(elem string) {
|
||||
delete(s, elem)
|
||||
}
|
||||
|
||||
/*
|
||||
Intersection creates and returns a new Set with the elements of the set on
|
||||
which it was called that are also in the passed set.
|
||||
*/
|
||||
func (s Set) Intersection(s2 Set) Set {
|
||||
res := NewSet()
|
||||
for elem := range s {
|
||||
if !s2.Has(elem) {
|
||||
continue
|
||||
}
|
||||
res.Add(elem)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
/*
|
||||
Difference creates and returns a new Set with the elements of the set on
|
||||
which it was called that are not in the passed set.
|
||||
*/
|
||||
func (s Set) Difference(s2 Set) Set {
|
||||
res := NewSet()
|
||||
for elem := range s {
|
||||
if s2.Has(elem) {
|
||||
continue
|
||||
}
|
||||
res.Add(elem)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
/*
|
||||
Filter creates and returns a new Set with the elements of the set on which it
|
||||
was called that match the passed pattern. A matching error is treated like a
|
||||
non-match plus a warning is printed.
|
||||
*/
|
||||
func (s Set) Filter(pattern string) Set {
|
||||
res := NewSet()
|
||||
for elem := range s {
|
||||
matched, err := match(pattern, elem)
|
||||
if err != nil {
|
||||
fmt.Printf("WARNING: %s, pattern was '%s'\n", err, pattern)
|
||||
continue
|
||||
}
|
||||
if !matched {
|
||||
continue
|
||||
}
|
||||
res.Add(elem)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
/*
|
||||
Slice creates and returns an unordered string slice with the elements of the
|
||||
set on which it was called.
|
||||
*/
|
||||
func (s Set) Slice() []string {
|
||||
var res []string
|
||||
res = make([]string, 0, len(s))
|
||||
for elem := range s {
|
||||
res = append(res, elem)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
/*
|
||||
InterfaceKeyStrings returns string keys of passed interface{} map in an
|
||||
unordered string slice.
|
||||
*/
|
||||
func InterfaceKeyStrings(m map[string]interface{}) []string {
|
||||
res := make([]string, len(m))
|
||||
i := 0
|
||||
for k := range m {
|
||||
res[i] = k
|
||||
i++
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
/*
|
||||
IsSubSet checks if the parameter subset is a
|
||||
subset of the superset s.
|
||||
*/
|
||||
func (s Set) IsSubSet(subset Set) bool {
|
||||
if len(subset) > len(s) {
|
||||
return false
|
||||
}
|
||||
for key := range subset {
|
||||
if s.Has(key) {
|
||||
continue
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
14
vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go
generated
vendored
Normal file
14
vendor/github.com/in-toto/in-toto-golang/in_toto/util_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
//go:build linux || darwin || !windows
|
||||
// +build linux darwin !windows
|
||||
|
||||
package in_toto
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
func isWritable(path string) error {
|
||||
err := unix.Access(path, unix.W_OK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
25
vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go
generated
vendored
Normal file
25
vendor/github.com/in-toto/in-toto-golang/in_toto/util_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
package in_toto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
)
|
||||
|
||||
func isWritable(path string) error {
|
||||
// get fileInfo
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check if path is a directory
|
||||
if !info.IsDir() {
|
||||
return errors.New("not a directory")
|
||||
}
|
||||
|
||||
// Check if the user bit is enabled in file permission
|
||||
if info.Mode().Perm()&(1<<(uint(7))) == 0 {
|
||||
return errors.New("not writable")
|
||||
}
|
||||
return nil
|
||||
}
|
1091
vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go
generated
vendored
Normal file
1091
vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
4130
vendor/github.com/moby/buildkit/api/services/control/control.pb.go
generated
vendored
4130
vendor/github.com/moby/buildkit/api/services/control/control.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
80
vendor/github.com/moby/buildkit/api/services/control/control.proto
generated
vendored
80
vendor/github.com/moby/buildkit/api/services/control/control.proto
generated
vendored
|
@ -6,6 +6,9 @@ import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
|||
import "google/protobuf/timestamp.proto";
|
||||
import "github.com/moby/buildkit/solver/pb/ops.proto";
|
||||
import "github.com/moby/buildkit/api/types/worker.proto";
|
||||
// import "github.com/containerd/containerd/api/types/descriptor.proto";
|
||||
import "github.com/gogo/googleapis/google/rpc/status.proto";
|
||||
import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto";
|
||||
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
|
@ -18,7 +21,10 @@ service Control {
|
|||
rpc Status(StatusRequest) returns (stream StatusResponse);
|
||||
rpc Session(stream BytesMessage) returns (stream BytesMessage);
|
||||
rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse);
|
||||
// rpc Info(InfoRequest) returns (InfoResponse);
|
||||
rpc Info(InfoRequest) returns (InfoResponse);
|
||||
|
||||
rpc ListenBuildHistory(BuildHistoryRequest) returns (stream BuildHistoryEvent);
|
||||
rpc UpdateBuildHistory(UpdateBuildHistoryRequest) returns (UpdateBuildHistoryResponse);
|
||||
}
|
||||
|
||||
message PruneRequest {
|
||||
|
@ -62,6 +68,8 @@ message SolveRequest {
|
|||
CacheOptions Cache = 8 [(gogoproto.nullable) = false];
|
||||
repeated string Entitlements = 9 [(gogoproto.customtype) = "github.com/moby/buildkit/util/entitlements.Entitlement" ];
|
||||
map<string, pb.Definition> FrontendInputs = 10;
|
||||
bool Internal = 11; // Internal builds are not recorded in build history
|
||||
moby.buildkit.v1.sourcepolicy.Policy SourcePolicy = 12;
|
||||
}
|
||||
|
||||
message CacheOptions {
|
||||
|
@ -157,3 +165,73 @@ message ListWorkersRequest {
|
|||
message ListWorkersResponse {
|
||||
repeated moby.buildkit.v1.types.WorkerRecord record = 1;
|
||||
}
|
||||
|
||||
message InfoRequest {}
|
||||
|
||||
message InfoResponse {
|
||||
moby.buildkit.v1.types.BuildkitVersion buildkitVersion = 1;
|
||||
}
|
||||
|
||||
message BuildHistoryRequest {
|
||||
bool ActiveOnly = 1;
|
||||
string Ref = 2;
|
||||
bool EarlyExit = 3;
|
||||
}
|
||||
|
||||
enum BuildHistoryEventType {
|
||||
STARTED = 0;
|
||||
COMPLETE = 1;
|
||||
DELETED = 2;
|
||||
}
|
||||
|
||||
message BuildHistoryEvent {
|
||||
BuildHistoryEventType type = 1;
|
||||
BuildHistoryRecord record = 2;
|
||||
}
|
||||
|
||||
message BuildHistoryRecord {
|
||||
string Ref = 1;
|
||||
string Frontend = 2;
|
||||
map<string, string> FrontendAttrs = 3;
|
||||
repeated Exporter Exporters = 4;
|
||||
google.rpc.Status error = 5;
|
||||
google.protobuf.Timestamp CreatedAt = 6 [(gogoproto.stdtime) = true];
|
||||
google.protobuf.Timestamp CompletedAt = 7 [(gogoproto.stdtime) = true];
|
||||
Descriptor logs = 8;
|
||||
map<string, string> ExporterResponse = 9;
|
||||
BuildResultInfo Result = 10;
|
||||
map<string, BuildResultInfo> Results = 11;
|
||||
int32 Generation = 12;
|
||||
Descriptor trace = 13;
|
||||
bool pinned = 14;
|
||||
int32 numCachedSteps = 15;
|
||||
int32 numTotalSteps = 16;
|
||||
int32 numCompletedSteps = 17;
|
||||
// TODO: tags
|
||||
// TODO: unclipped logs
|
||||
}
|
||||
|
||||
message UpdateBuildHistoryRequest {
|
||||
string Ref = 1;
|
||||
bool Pinned = 2;
|
||||
bool Delete = 3;
|
||||
}
|
||||
|
||||
message UpdateBuildHistoryResponse {}
|
||||
|
||||
message Descriptor {
|
||||
string media_type = 1;
|
||||
string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
int64 size = 3;
|
||||
map<string, string> annotations = 5;
|
||||
}
|
||||
|
||||
message BuildResultInfo {
|
||||
Descriptor Result = 1;
|
||||
repeated Descriptor Attestations = 2;
|
||||
}
|
||||
|
||||
message Exporter {
|
||||
string Type = 1;
|
||||
map<string, string> Attrs = 2;
|
||||
}
|
||||
|
|
394
vendor/github.com/moby/buildkit/api/types/worker.pb.go
generated
vendored
394
vendor/github.com/moby/buildkit/api/types/worker.pb.go
generated
vendored
|
@ -29,6 +29,7 @@ type WorkerRecord struct {
|
|||
Labels map[string]string `protobuf:"bytes,2,rep,name=Labels,proto3" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
Platforms []pb.Platform `protobuf:"bytes,3,rep,name=platforms,proto3" json:"platforms"`
|
||||
GCPolicy []*GCPolicy `protobuf:"bytes,4,rep,name=GCPolicy,proto3" json:"GCPolicy,omitempty"`
|
||||
BuildkitVersion *BuildkitVersion `protobuf:"bytes,5,opt,name=BuildkitVersion,proto3" json:"BuildkitVersion,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
|
@ -95,6 +96,13 @@ func (m *WorkerRecord) GetGCPolicy() []*GCPolicy {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *WorkerRecord) GetBuildkitVersion() *BuildkitVersion {
|
||||
if m != nil {
|
||||
return m.BuildkitVersion
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GCPolicy struct {
|
||||
All bool `protobuf:"varint,1,opt,name=all,proto3" json:"all,omitempty"`
|
||||
KeepDuration int64 `protobuf:"varint,2,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"`
|
||||
|
@ -166,39 +174,106 @@ func (m *GCPolicy) GetFilters() []string {
|
|||
return nil
|
||||
}
|
||||
|
||||
type BuildkitVersion struct {
|
||||
Package string `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"`
|
||||
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
|
||||
Revision string `protobuf:"bytes,3,opt,name=revision,proto3" json:"revision,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BuildkitVersion) Reset() { *m = BuildkitVersion{} }
|
||||
func (m *BuildkitVersion) String() string { return proto.CompactTextString(m) }
|
||||
func (*BuildkitVersion) ProtoMessage() {}
|
||||
func (*BuildkitVersion) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e4ff6184b07e587a, []int{2}
|
||||
}
|
||||
func (m *BuildkitVersion) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *BuildkitVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_BuildkitVersion.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *BuildkitVersion) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BuildkitVersion.Merge(m, src)
|
||||
}
|
||||
func (m *BuildkitVersion) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *BuildkitVersion) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BuildkitVersion.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BuildkitVersion proto.InternalMessageInfo
|
||||
|
||||
func (m *BuildkitVersion) GetPackage() string {
|
||||
if m != nil {
|
||||
return m.Package
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *BuildkitVersion) GetVersion() string {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *BuildkitVersion) GetRevision() string {
|
||||
if m != nil {
|
||||
return m.Revision
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*WorkerRecord)(nil), "moby.buildkit.v1.types.WorkerRecord")
|
||||
proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.types.WorkerRecord.LabelsEntry")
|
||||
proto.RegisterType((*GCPolicy)(nil), "moby.buildkit.v1.types.GCPolicy")
|
||||
proto.RegisterType((*BuildkitVersion)(nil), "moby.buildkit.v1.types.BuildkitVersion")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("worker.proto", fileDescriptor_e4ff6184b07e587a) }
|
||||
|
||||
var fileDescriptor_e4ff6184b07e587a = []byte{
|
||||
// 355 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x4e, 0xea, 0x40,
|
||||
0x14, 0x86, 0x6f, 0x5b, 0x2e, 0x97, 0x0e, 0xcd, 0x8d, 0x99, 0x18, 0xd3, 0x10, 0x83, 0x84, 0x15,
|
||||
0x0b, 0x9d, 0xa2, 0x6e, 0xd4, 0xb8, 0x42, 0x8c, 0x92, 0xb8, 0x20, 0xb3, 0x71, 0xdd, 0x81, 0x01,
|
||||
0x9b, 0x0e, 0x9c, 0xc9, 0x74, 0x8a, 0xf6, 0x39, 0x7c, 0x29, 0x96, 0x3e, 0x81, 0x31, 0x3c, 0x89,
|
||||
0x99, 0x29, 0x08, 0x26, 0xba, 0x3b, 0xff, 0x9f, 0xff, 0xfb, 0xe7, 0x9c, 0x0c, 0x0a, 0x9e, 0x41,
|
||||
0xa5, 0x5c, 0x11, 0xa9, 0x40, 0x03, 0x3e, 0x98, 0x01, 0x2b, 0x08, 0xcb, 0x13, 0x31, 0x4e, 0x13,
|
||||
0x4d, 0x16, 0xa7, 0x44, 0x17, 0x92, 0x67, 0x8d, 0x93, 0x69, 0xa2, 0x9f, 0x72, 0x46, 0x46, 0x30,
|
||||
0x8b, 0xa6, 0x30, 0x85, 0xc8, 0xc6, 0x59, 0x3e, 0xb1, 0xca, 0x0a, 0x3b, 0x95, 0x35, 0x8d, 0xe3,
|
||||
0x9d, 0xb8, 0x69, 0x8c, 0x36, 0x8d, 0x51, 0x06, 0x62, 0xc1, 0x55, 0x24, 0x59, 0x04, 0x32, 0x2b,
|
||||
0xd3, 0xed, 0x57, 0x17, 0x05, 0x8f, 0x76, 0x0b, 0xca, 0x47, 0xa0, 0xc6, 0xf8, 0x3f, 0x72, 0x07,
|
||||
0xfd, 0xd0, 0x69, 0x39, 0x1d, 0x9f, 0xba, 0x83, 0x3e, 0xbe, 0x47, 0xd5, 0x87, 0x98, 0x71, 0x91,
|
||||
0x85, 0x6e, 0xcb, 0xeb, 0xd4, 0xcf, 0xba, 0xe4, 0xe7, 0x35, 0xc9, 0x6e, 0x0b, 0x29, 0x91, 0xdb,
|
||||
0xb9, 0x56, 0x05, 0x5d, 0xf3, 0xb8, 0x8b, 0x7c, 0x29, 0x62, 0x3d, 0x01, 0x35, 0xcb, 0x42, 0xcf,
|
||||
0x96, 0x05, 0x44, 0x32, 0x32, 0x5c, 0x9b, 0xbd, 0xca, 0xf2, 0xfd, 0xe8, 0x0f, 0xdd, 0x86, 0xf0,
|
||||
0x35, 0xaa, 0xdd, 0xdd, 0x0c, 0x41, 0x24, 0xa3, 0x22, 0xac, 0x58, 0xa0, 0xf5, 0xdb, 0xeb, 0x9b,
|
||||
0x1c, 0xfd, 0x22, 0x1a, 0x97, 0xa8, 0xbe, 0xb3, 0x06, 0xde, 0x43, 0x5e, 0xca, 0x8b, 0xf5, 0x65,
|
||||
0x66, 0xc4, 0xfb, 0xe8, 0xef, 0x22, 0x16, 0x39, 0x0f, 0x5d, 0xeb, 0x95, 0xe2, 0xca, 0xbd, 0x70,
|
||||
0xda, 0x2f, 0xdb, 0x87, 0x0d, 0x17, 0x0b, 0x61, 0xb9, 0x1a, 0x35, 0x23, 0x6e, 0xa3, 0x20, 0xe5,
|
||||
0x5c, 0xf6, 0x73, 0x15, 0xeb, 0x04, 0xe6, 0x16, 0xf7, 0xe8, 0x37, 0x0f, 0x1f, 0x22, 0xdf, 0xe8,
|
||||
0x5e, 0xa1, 0xb9, 0x39, 0xd6, 0x04, 0xb6, 0x06, 0x0e, 0xd1, 0xbf, 0x49, 0x22, 0x34, 0x57, 0x99,
|
||||
0xbd, 0xcb, 0xa7, 0x1b, 0xd9, 0x0b, 0x96, 0xab, 0xa6, 0xf3, 0xb6, 0x6a, 0x3a, 0x1f, 0xab, 0xa6,
|
||||
0xc3, 0xaa, 0xf6, 0x93, 0xce, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x79, 0x52, 0x6a, 0x29,
|
||||
0x02, 0x00, 0x00,
|
||||
// 416 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x52, 0xc1, 0x8e, 0xd3, 0x30,
|
||||
0x10, 0x25, 0xc9, 0xee, 0xd2, 0xb8, 0x11, 0x20, 0x0b, 0xa1, 0x28, 0x42, 0x25, 0xca, 0x85, 0x1e,
|
||||
0xc0, 0x59, 0x96, 0x0b, 0x20, 0x4e, 0xa1, 0x08, 0x56, 0xe2, 0xb0, 0xf8, 0x00, 0x67, 0x3b, 0xeb,
|
||||
0x86, 0x28, 0xee, 0xda, 0x72, 0x9c, 0x40, 0xfe, 0xb0, 0x47, 0xbe, 0x00, 0xa1, 0x1e, 0xf8, 0x0e,
|
||||
0x64, 0x27, 0x69, 0x4b, 0xd9, 0xde, 0xe6, 0xcd, 0xbc, 0xf7, 0x3c, 0xf3, 0x64, 0x10, 0x7c, 0x17,
|
||||
0xaa, 0x62, 0x0a, 0x49, 0x25, 0xb4, 0x80, 0x8f, 0x56, 0x82, 0x76, 0x88, 0x36, 0x25, 0xbf, 0xae,
|
||||
0x4a, 0x8d, 0xda, 0x17, 0x48, 0x77, 0x92, 0xd5, 0xd1, 0xf3, 0xa2, 0xd4, 0xdf, 0x1a, 0x8a, 0x72,
|
||||
0xb1, 0x4a, 0x0b, 0x51, 0x88, 0xd4, 0xd2, 0x69, 0xb3, 0xb4, 0xc8, 0x02, 0x5b, 0xf5, 0x36, 0xd1,
|
||||
0xb3, 0x3d, 0xba, 0x71, 0x4c, 0x47, 0xc7, 0xb4, 0x16, 0xbc, 0x65, 0x2a, 0x95, 0x34, 0x15, 0xb2,
|
||||
0xee, 0xd9, 0xc9, 0x1f, 0x17, 0x04, 0x5f, 0xed, 0x16, 0x98, 0xe5, 0x42, 0x5d, 0xc3, 0x7b, 0xc0,
|
||||
0xbd, 0x5c, 0x84, 0x4e, 0xec, 0xcc, 0x7d, 0xec, 0x5e, 0x2e, 0xe0, 0x47, 0x70, 0xf6, 0x89, 0x50,
|
||||
0xc6, 0xeb, 0xd0, 0x8d, 0xbd, 0xf9, 0xf4, 0xe2, 0x1c, 0xdd, 0xbe, 0x26, 0xda, 0x77, 0x41, 0xbd,
|
||||
0xe4, 0xfd, 0x8d, 0x56, 0x1d, 0x1e, 0xf4, 0xf0, 0x1c, 0xf8, 0x92, 0x13, 0xbd, 0x14, 0x6a, 0x55,
|
||||
0x87, 0x9e, 0x35, 0x0b, 0x90, 0xa4, 0xe8, 0x6a, 0x68, 0x66, 0x27, 0xeb, 0x5f, 0x4f, 0xee, 0xe0,
|
||||
0x1d, 0x09, 0xbe, 0x05, 0x93, 0x0f, 0xef, 0xae, 0x04, 0x2f, 0xf3, 0x2e, 0x3c, 0xb1, 0x82, 0xf8,
|
||||
0xd8, 0xeb, 0x23, 0x0f, 0x6f, 0x15, 0xf0, 0x33, 0xb8, 0x9f, 0x0d, 0xbc, 0x2f, 0x4c, 0xd5, 0xa5,
|
||||
0xb8, 0x09, 0x4f, 0x63, 0x67, 0x3e, 0xbd, 0x78, 0x7a, 0xcc, 0xe4, 0x80, 0x8e, 0x0f, 0xf5, 0xd1,
|
||||
0x6b, 0x30, 0xdd, 0xbb, 0x0c, 0x3e, 0x00, 0x5e, 0xc5, 0xba, 0x21, 0x2c, 0x53, 0xc2, 0x87, 0xe0,
|
||||
0xb4, 0x25, 0xbc, 0x61, 0xa1, 0x6b, 0x7b, 0x3d, 0x78, 0xe3, 0xbe, 0x72, 0x92, 0x1f, 0xbb, 0x5b,
|
||||
0x8c, 0x8e, 0x70, 0x6e, 0x75, 0x13, 0x6c, 0x4a, 0x98, 0x80, 0xa0, 0x62, 0x4c, 0x2e, 0x1a, 0x45,
|
||||
0xb4, 0x59, 0xd4, 0xc8, 0x3d, 0xfc, 0x4f, 0x0f, 0x3e, 0x06, 0xbe, 0xc1, 0x59, 0xa7, 0x99, 0xc9,
|
||||
0xcf, 0x10, 0x76, 0x0d, 0x18, 0x82, 0xbb, 0xcb, 0x92, 0x6b, 0xa6, 0x6a, 0x1b, 0x95, 0x8f, 0x47,
|
||||
0x98, 0x90, 0xff, 0x72, 0x30, 0x64, 0x49, 0xf2, 0x8a, 0x14, 0x6c, 0x58, 0x7e, 0x84, 0x66, 0xd2,
|
||||
0x0e, 0x61, 0xf5, 0x27, 0x8c, 0x10, 0x46, 0x60, 0xa2, 0x58, 0x5b, 0xda, 0x91, 0x67, 0x47, 0x5b,
|
||||
0x9c, 0x05, 0xeb, 0xcd, 0xcc, 0xf9, 0xb9, 0x99, 0x39, 0xbf, 0x37, 0x33, 0x87, 0x9e, 0xd9, 0xaf,
|
||||
0xf5, 0xf2, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x19, 0xcf, 0xd5, 0xdf, 0x02, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *WorkerRecord) Marshal() (dAtA []byte, err error) {
|
||||
|
@ -225,6 +300,18 @@ func (m *WorkerRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.BuildkitVersion != nil {
|
||||
{
|
||||
size, err := m.BuildkitVersion.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintWorker(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x2a
|
||||
}
|
||||
if len(m.GCPolicy) > 0 {
|
||||
for iNdEx := len(m.GCPolicy) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
|
@ -338,6 +425,54 @@ func (m *GCPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
|||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *BuildkitVersion) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *BuildkitVersion) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *BuildkitVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if len(m.Revision) > 0 {
|
||||
i -= len(m.Revision)
|
||||
copy(dAtA[i:], m.Revision)
|
||||
i = encodeVarintWorker(dAtA, i, uint64(len(m.Revision)))
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
}
|
||||
if len(m.Version) > 0 {
|
||||
i -= len(m.Version)
|
||||
copy(dAtA[i:], m.Version)
|
||||
i = encodeVarintWorker(dAtA, i, uint64(len(m.Version)))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if len(m.Package) > 0 {
|
||||
i -= len(m.Package)
|
||||
copy(dAtA[i:], m.Package)
|
||||
i = encodeVarintWorker(dAtA, i, uint64(len(m.Package)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintWorker(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovWorker(v)
|
||||
base := offset
|
||||
|
@ -379,6 +514,10 @@ func (m *WorkerRecord) Size() (n int) {
|
|||
n += 1 + l + sovWorker(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.BuildkitVersion != nil {
|
||||
l = m.BuildkitVersion.Size()
|
||||
n += 1 + l + sovWorker(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
|
@ -412,6 +551,30 @@ func (m *GCPolicy) Size() (n int) {
|
|||
return n
|
||||
}
|
||||
|
||||
func (m *BuildkitVersion) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Package)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovWorker(uint64(l))
|
||||
}
|
||||
l = len(m.Version)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovWorker(uint64(l))
|
||||
}
|
||||
l = len(m.Revision)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovWorker(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovWorker(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
|
@ -674,6 +837,42 @@ func (m *WorkerRecord) Unmarshal(dAtA []byte) error {
|
|||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 5:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field BuildkitVersion", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowWorker
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthWorker
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthWorker
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.BuildkitVersion == nil {
|
||||
m.BuildkitVersion = &BuildkitVersion{}
|
||||
}
|
||||
if err := m.BuildkitVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipWorker(dAtA[iNdEx:])
|
||||
|
@ -837,6 +1036,153 @@ func (m *GCPolicy) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
func (m *BuildkitVersion) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowWorker
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: BuildkitVersion: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: BuildkitVersion: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Package", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowWorker
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthWorker
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthWorker
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Package = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowWorker
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthWorker
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthWorker
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Version = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowWorker
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthWorker
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthWorker
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Revision = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipWorker(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthWorker
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipWorker(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
|
|
7
vendor/github.com/moby/buildkit/api/types/worker.proto
generated
vendored
7
vendor/github.com/moby/buildkit/api/types/worker.proto
generated
vendored
|
@ -14,6 +14,7 @@ message WorkerRecord {
|
|||
map<string, string> Labels = 2;
|
||||
repeated pb.Platform platforms = 3 [(gogoproto.nullable) = false];
|
||||
repeated GCPolicy GCPolicy = 4;
|
||||
BuildkitVersion BuildkitVersion = 5;
|
||||
}
|
||||
|
||||
message GCPolicy {
|
||||
|
@ -22,3 +23,9 @@ message GCPolicy {
|
|||
int64 keepBytes = 3;
|
||||
repeated string filters = 4;
|
||||
}
|
||||
|
||||
message BuildkitVersion {
|
||||
string package = 1;
|
||||
string version = 2;
|
||||
string revision = 3;
|
||||
}
|
||||
|
|
77
vendor/github.com/moby/buildkit/cache/blobs.go
generated
vendored
77
vendor/github.com/moby/buildkit/cache/blobs.go
generated
vendored
|
@ -1,19 +1,15 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/diff"
|
||||
"github.com/containerd/containerd/diff/walking"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/util/compression"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
|
@ -40,6 +36,14 @@ func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded boo
|
|||
if _, ok := leases.FromContext(ctx); !ok {
|
||||
return errors.Errorf("missing lease requirement for computeBlobChain")
|
||||
}
|
||||
if !createIfNeeded {
|
||||
sr.mu.Lock()
|
||||
if sr.equalMutable != nil {
|
||||
sr.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
sr.mu.Unlock()
|
||||
}
|
||||
|
||||
if err := sr.Finalize(ctx); err != nil {
|
||||
return err
|
||||
|
@ -57,8 +61,6 @@ func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded boo
|
|||
return computeBlobChain(ctx, sr, createIfNeeded, comp, s, filter)
|
||||
}
|
||||
|
||||
type compressor func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error)
|
||||
|
||||
func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool, comp compression.Config, s session.Group, filter map[string]struct{}) error {
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
switch sr.kind() {
|
||||
|
@ -92,28 +94,8 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
|
|||
return nil, errors.WithStack(ErrNoBlobs)
|
||||
}
|
||||
|
||||
var mediaType string
|
||||
var compressorFunc compressor
|
||||
var finalize func(context.Context, content.Store) (map[string]string, error)
|
||||
switch comp.Type {
|
||||
case compression.Uncompressed:
|
||||
mediaType = ocispecs.MediaTypeImageLayer
|
||||
case compression.Gzip:
|
||||
compressorFunc = func(dest io.Writer, _ string) (io.WriteCloser, error) {
|
||||
return gzipWriter(comp)(dest)
|
||||
}
|
||||
mediaType = ocispecs.MediaTypeImageLayerGzip
|
||||
case compression.EStargz:
|
||||
compressorFunc, finalize = compressEStargz(comp)
|
||||
mediaType = ocispecs.MediaTypeImageLayerGzip
|
||||
case compression.Zstd:
|
||||
compressorFunc = func(dest io.Writer, _ string) (io.WriteCloser, error) {
|
||||
return zstdWriter(comp)(dest)
|
||||
}
|
||||
mediaType = ocispecs.MediaTypeImageLayer + "+zstd"
|
||||
default:
|
||||
return nil, errors.Errorf("unknown layer compression type: %q", comp.Type)
|
||||
}
|
||||
compressorFunc, finalize := comp.Type.Compress(ctx, comp)
|
||||
mediaType := comp.Type.MediaType()
|
||||
|
||||
var lowerRef *immutableRef
|
||||
switch sr.kind() {
|
||||
|
@ -206,7 +188,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
|
|||
}
|
||||
}
|
||||
|
||||
if desc.Digest == "" && !isTypeWindows(sr) && (comp.Type == compression.Zstd || comp.Type == compression.EStargz) {
|
||||
if desc.Digest == "" && !isTypeWindows(sr) && comp.Type.NeedsComputeDiffBySelf() {
|
||||
// These compression types aren't supported by containerd differ. So try to compute diff on buildkit side.
|
||||
// This case can be happen on containerd worker + non-overlayfs snapshotter (e.g. native).
|
||||
// See also: https://github.com/containerd/containerd/issues/4263
|
||||
|
@ -433,7 +415,7 @@ func isTypeWindows(sr *immutableRef) bool {
|
|||
|
||||
// ensureCompression ensures the specified ref has the blob of the specified compression Type.
|
||||
func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.Config, s session.Group) error {
|
||||
_, err := g.Do(ctx, fmt.Sprintf("%s-%d", ref.ID(), comp.Type), func(ctx context.Context) (interface{}, error) {
|
||||
_, err := g.Do(ctx, fmt.Sprintf("%s-%s", ref.ID(), comp.Type), func(ctx context.Context) (interface{}, error) {
|
||||
desc, err := ref.ociDesc(ctx, ref.descHandlers, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -480,38 +462,3 @@ func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.
|
|||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func gzipWriter(comp compression.Config) func(io.Writer) (io.WriteCloser, error) {
|
||||
return func(dest io.Writer) (io.WriteCloser, error) {
|
||||
level := gzip.DefaultCompression
|
||||
if comp.Level != nil {
|
||||
level = *comp.Level
|
||||
}
|
||||
return gzip.NewWriterLevel(dest, level)
|
||||
}
|
||||
}
|
||||
|
||||
func zstdWriter(comp compression.Config) func(io.Writer) (io.WriteCloser, error) {
|
||||
return func(dest io.Writer) (io.WriteCloser, error) {
|
||||
level := zstd.SpeedDefault
|
||||
if comp.Level != nil {
|
||||
level = toZstdEncoderLevel(*comp.Level)
|
||||
}
|
||||
return zstd.NewWriter(dest, zstd.WithEncoderLevel(level))
|
||||
}
|
||||
}
|
||||
|
||||
func toZstdEncoderLevel(level int) zstd.EncoderLevel {
|
||||
// map zstd compression levels to go-zstd levels
|
||||
// once we also have c based implementation move this to helper pkg
|
||||
if level < 0 {
|
||||
return zstd.SpeedDefault
|
||||
} else if level < 3 {
|
||||
return zstd.SpeedFastest
|
||||
} else if level < 7 {
|
||||
return zstd.SpeedDefault
|
||||
} else if level < 9 {
|
||||
return zstd.SpeedBetterCompression
|
||||
}
|
||||
return zstd.SpeedBestCompression
|
||||
}
|
||||
|
|
12
vendor/github.com/moby/buildkit/cache/blobs_linux.go
generated
vendored
12
vendor/github.com/moby/buildkit/cache/blobs_linux.go
generated
vendored
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/moby/buildkit/util/bklog"
|
||||
"github.com/moby/buildkit/util/compression"
|
||||
"github.com/moby/buildkit/util/overlay"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
@ -24,7 +25,7 @@ var emptyDesc = ocispecs.Descriptor{}
|
|||
// diff between lower and upper snapshot. If the passed mounts cannot
|
||||
// be computed (e.g. because the mounts aren't overlayfs), it returns
|
||||
// an error.
|
||||
func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compressor) (_ ocispecs.Descriptor, ok bool, err error) {
|
||||
func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compression.Compressor) (_ ocispecs.Descriptor, ok bool, err error) {
|
||||
// Get upperdir location if mounts are overlayfs that can be processed by this differ.
|
||||
upperdir, err := overlay.GetUpperdir(lower, upper)
|
||||
if err != nil {
|
||||
|
@ -57,11 +58,14 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper
|
|||
if err != nil {
|
||||
return emptyDesc, false, errors.Wrap(err, "failed to get compressed stream")
|
||||
}
|
||||
err = overlay.WriteUpperdir(ctx, io.MultiWriter(compressed, dgstr.Hash()), upperdir, lower)
|
||||
compressed.Close()
|
||||
if err != nil {
|
||||
// Close ensure compressorFunc does some finalization works.
|
||||
defer compressed.Close()
|
||||
if err := overlay.WriteUpperdir(ctx, io.MultiWriter(compressed, dgstr.Hash()), upperdir, lower); err != nil {
|
||||
return emptyDesc, false, errors.Wrap(err, "failed to write compressed diff")
|
||||
}
|
||||
if err := compressed.Close(); err != nil {
|
||||
return emptyDesc, false, errors.Wrap(err, "failed to close compressed diff writer")
|
||||
}
|
||||
if labels == nil {
|
||||
labels = map[string]string{}
|
||||
}
|
||||
|
|
3
vendor/github.com/moby/buildkit/cache/blobs_nolinux.go
generated
vendored
3
vendor/github.com/moby/buildkit/cache/blobs_nolinux.go
generated
vendored
|
@ -6,11 +6,12 @@ package cache
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/moby/buildkit/util/compression"
|
||||
"github.com/containerd/containerd/mount"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compressor) (_ ocispecs.Descriptor, ok bool, err error) {
|
||||
func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compression.Compressor) (_ ocispecs.Descriptor, ok bool, err error) {
|
||||
return ocispecs.Descriptor{}, true, errors.Errorf("overlayfs-based diff computing is unsupported")
|
||||
}
|
||||
|
|
16
vendor/github.com/moby/buildkit/cache/compression.go
generated
vendored
Normal file
16
vendor/github.com/moby/buildkit/cache/compression.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
//go:build !nydus
|
||||
// +build !nydus
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/moby/buildkit/cache/config"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
func needsForceCompression(ctx context.Context, cs content.Store, source ocispecs.Descriptor, refCfg config.RefConfig) bool {
|
||||
return refCfg.Compression.Force
|
||||
}
|
147
vendor/github.com/moby/buildkit/cache/compression_nydus.go
generated
vendored
Normal file
147
vendor/github.com/moby/buildkit/cache/compression_nydus.go
generated
vendored
Normal file
|
@ -0,0 +1,147 @@
|
|||
//go:build nydus
|
||||
// +build nydus
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/moby/buildkit/cache/config"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/util/compression"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
nydusify "github.com/containerd/nydus-snapshotter/pkg/converter"
|
||||
)
|
||||
|
||||
func init() {
|
||||
additionalAnnotations = append(
|
||||
additionalAnnotations,
|
||||
nydusify.LayerAnnotationNydusBlob, nydusify.LayerAnnotationNydusBootstrap, nydusify.LayerAnnotationNydusBlobIDs,
|
||||
)
|
||||
}
|
||||
|
||||
// Nydus compression type can't be mixed with other compression types in the same image,
|
||||
// so if `source` is this kind of layer, but the target is other compression type, we
|
||||
// should do the forced compression.
|
||||
func needsForceCompression(ctx context.Context, cs content.Store, source ocispecs.Descriptor, refCfg config.RefConfig) bool {
|
||||
if refCfg.Compression.Force {
|
||||
return true
|
||||
}
|
||||
isNydusBlob, _ := compression.Nydus.Is(ctx, cs, source)
|
||||
if refCfg.Compression.Type == compression.Nydus {
|
||||
return !isNydusBlob
|
||||
}
|
||||
return isNydusBlob
|
||||
}
|
||||
|
||||
// MergeNydus does two steps:
|
||||
// 1. Extracts nydus bootstrap from nydus format (nydus blob + nydus bootstrap) for each layer.
|
||||
// 2. Merge all nydus bootstraps into a final bootstrap (will as an extra layer).
|
||||
// The nydus bootstrap size is very small, so the merge operation is fast.
|
||||
func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config, s session.Group) (*ocispecs.Descriptor, error) {
|
||||
iref, ok := ref.(*immutableRef)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("unsupported ref type %T", ref)
|
||||
}
|
||||
refs := iref.layerChain()
|
||||
if len(refs) == 0 {
|
||||
return nil, errors.Errorf("refs can't be empty")
|
||||
}
|
||||
|
||||
// Extracts nydus bootstrap from nydus format for each layer.
|
||||
var cm *cacheManager
|
||||
layers := []nydusify.Layer{}
|
||||
blobIDs := []string{}
|
||||
for _, ref := range refs {
|
||||
blobDesc, err := getBlobWithCompressionWithRetry(ctx, ref, comp, s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "get compression blob %q", comp.Type)
|
||||
}
|
||||
ra, err := ref.cm.ContentStore.ReaderAt(ctx, blobDesc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "get reader for compression blob %q", comp.Type)
|
||||
}
|
||||
defer ra.Close()
|
||||
if cm == nil {
|
||||
cm = ref.cm
|
||||
}
|
||||
blobIDs = append(blobIDs, blobDesc.Digest.Hex())
|
||||
layers = append(layers, nydusify.Layer{
|
||||
Digest: blobDesc.Digest,
|
||||
ReaderAt: ra,
|
||||
})
|
||||
}
|
||||
|
||||
// Merge all nydus bootstraps into a final nydus bootstrap.
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
defer pw.Close()
|
||||
if _, err := nydusify.Merge(ctx, layers, pw, nydusify.MergeOption{
|
||||
WithTar: true,
|
||||
}); err != nil {
|
||||
pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap"))
|
||||
}
|
||||
}()
|
||||
|
||||
// Compress final nydus bootstrap to tar.gz and write into content store.
|
||||
cw, err := content.OpenWriter(ctx, cm.ContentStore, content.WithRef("nydus-merge-"+iref.getChainID().String()))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open content store writer")
|
||||
}
|
||||
defer cw.Close()
|
||||
|
||||
gw := gzip.NewWriter(cw)
|
||||
uncompressedDgst := digest.SHA256.Digester()
|
||||
compressed := io.MultiWriter(gw, uncompressedDgst.Hash())
|
||||
if _, err := io.Copy(compressed, pr); err != nil {
|
||||
return nil, errors.Wrapf(err, "copy bootstrap targz into content store")
|
||||
}
|
||||
if err := gw.Close(); err != nil {
|
||||
return nil, errors.Wrap(err, "close gzip writer")
|
||||
}
|
||||
|
||||
compressedDgst := cw.Digest()
|
||||
if err := cw.Commit(ctx, 0, compressedDgst, content.WithLabels(map[string]string{
|
||||
containerdUncompressed: uncompressedDgst.Digest().String(),
|
||||
})); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return nil, errors.Wrap(err, "commit to content store")
|
||||
}
|
||||
}
|
||||
if err := cw.Close(); err != nil {
|
||||
return nil, errors.Wrap(err, "close content store writer")
|
||||
}
|
||||
|
||||
info, err := cm.ContentStore.Info(ctx, compressedDgst)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get info from content store")
|
||||
}
|
||||
|
||||
blobIDsBytes, err := json.Marshal(blobIDs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "marshal blob ids")
|
||||
}
|
||||
|
||||
desc := ocispecs.Descriptor{
|
||||
Digest: compressedDgst,
|
||||
Size: info.Size,
|
||||
MediaType: ocispecs.MediaTypeImageLayerGzip,
|
||||
Annotations: map[string]string{
|
||||
containerdUncompressed: uncompressedDgst.Digest().String(),
|
||||
// Use this annotation to identify nydus bootstrap layer.
|
||||
nydusify.LayerAnnotationNydusBootstrap: "true",
|
||||
// Track all blob digests for nydus snapshotter.
|
||||
nydusify.LayerAnnotationNydusBlobIDs: string(blobIDsBytes),
|
||||
},
|
||||
}
|
||||
|
||||
return &desc, nil
|
||||
}
|
38
vendor/github.com/moby/buildkit/cache/contenthash/checksum.go
generated
vendored
38
vendor/github.com/moby/buildkit/cache/contenthash/checksum.go
generated
vendored
|
@ -11,13 +11,13 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
iradix "github.com/hashicorp/go-immutable-radix"
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/locker"
|
||||
"github.com/moby/patternmatcher"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
|
@ -79,8 +79,8 @@ type includedPath struct {
|
|||
path string
|
||||
record *CacheRecord
|
||||
included bool
|
||||
includeMatchInfo fileutils.MatchInfo
|
||||
excludeMatchInfo fileutils.MatchInfo
|
||||
includeMatchInfo patternmatcher.MatchInfo
|
||||
excludeMatchInfo patternmatcher.MatchInfo
|
||||
}
|
||||
|
||||
type cacheManager struct {
|
||||
|
@ -496,17 +496,17 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
|
|||
endsInSep := len(p) != 0 && p[len(p)-1] == filepath.Separator
|
||||
p = keyPath(p)
|
||||
|
||||
var includePatternMatcher *fileutils.PatternMatcher
|
||||
var includePatternMatcher *patternmatcher.PatternMatcher
|
||||
if len(opts.IncludePatterns) != 0 {
|
||||
includePatternMatcher, err = fileutils.NewPatternMatcher(opts.IncludePatterns)
|
||||
includePatternMatcher, err = patternmatcher.New(opts.IncludePatterns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid includepatterns: %s", opts.IncludePatterns)
|
||||
}
|
||||
}
|
||||
|
||||
var excludePatternMatcher *fileutils.PatternMatcher
|
||||
var excludePatternMatcher *patternmatcher.PatternMatcher
|
||||
if len(opts.ExcludePatterns) != 0 {
|
||||
excludePatternMatcher, err = fileutils.NewPatternMatcher(opts.ExcludePatterns)
|
||||
excludePatternMatcher, err = patternmatcher.New(opts.ExcludePatterns)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid excludepatterns: %s", opts.ExcludePatterns)
|
||||
}
|
||||
|
@ -695,21 +695,21 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
|
|||
|
||||
func shouldIncludePath(
|
||||
candidate string,
|
||||
includePatternMatcher *fileutils.PatternMatcher,
|
||||
excludePatternMatcher *fileutils.PatternMatcher,
|
||||
includePatternMatcher *patternmatcher.PatternMatcher,
|
||||
excludePatternMatcher *patternmatcher.PatternMatcher,
|
||||
maybeIncludedPath *includedPath,
|
||||
parentDir *includedPath,
|
||||
) (bool, error) {
|
||||
var (
|
||||
m bool
|
||||
matchInfo fileutils.MatchInfo
|
||||
matchInfo patternmatcher.MatchInfo
|
||||
err error
|
||||
)
|
||||
if includePatternMatcher != nil {
|
||||
if parentDir != nil {
|
||||
m, matchInfo, err = includePatternMatcher.MatchesUsingParentResults(candidate, parentDir.includeMatchInfo)
|
||||
} else {
|
||||
m, matchInfo, err = includePatternMatcher.MatchesUsingParentResults(candidate, fileutils.MatchInfo{})
|
||||
m, matchInfo, err = includePatternMatcher.MatchesUsingParentResults(candidate, patternmatcher.MatchInfo{})
|
||||
}
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to match includepatterns")
|
||||
|
@ -724,7 +724,7 @@ func shouldIncludePath(
|
|||
if parentDir != nil {
|
||||
m, matchInfo, err = excludePatternMatcher.MatchesUsingParentResults(candidate, parentDir.excludeMatchInfo)
|
||||
} else {
|
||||
m, matchInfo, err = excludePatternMatcher.MatchesUsingParentResults(candidate, fileutils.MatchInfo{})
|
||||
m, matchInfo, err = excludePatternMatcher.MatchesUsingParentResults(candidate, patternmatcher.MatchInfo{})
|
||||
}
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to match excludepatterns")
|
||||
|
@ -799,7 +799,7 @@ func splitWildcards(p string) (d1, d2 string) {
|
|||
p2 = append(p2, p)
|
||||
}
|
||||
}
|
||||
return filepath.Join(p1...), filepath.Join(p2...)
|
||||
return path.Join(p1...), path.Join(p2...)
|
||||
}
|
||||
|
||||
func containsWildcards(name string) bool {
|
||||
|
@ -1015,7 +1015,7 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr
|
|||
Type: CacheRecordTypeSymlink,
|
||||
Linkname: filepath.ToSlash(link),
|
||||
}
|
||||
k := []byte(filepath.Join("/", filepath.ToSlash(p)))
|
||||
k := []byte(path.Join("/", filepath.ToSlash(p)))
|
||||
k = convertPathToKey(k)
|
||||
txn.Insert(k, cr)
|
||||
return nil
|
||||
|
@ -1024,15 +1024,15 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr
|
|||
return err
|
||||
}
|
||||
|
||||
err = filepath.Walk(parentPath, func(path string, fi os.FileInfo, err error) error {
|
||||
err = filepath.Walk(parentPath, func(itemPath string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to walk %s", path)
|
||||
return errors.Wrapf(err, "failed to walk %s", itemPath)
|
||||
}
|
||||
rel, err := filepath.Rel(mp, path)
|
||||
rel, err := filepath.Rel(mp, itemPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k := []byte(filepath.Join("/", filepath.ToSlash(rel)))
|
||||
k := []byte(path.Join("/", filepath.ToSlash(rel)))
|
||||
if string(k) == "/" {
|
||||
k = []byte{}
|
||||
}
|
||||
|
@ -1043,7 +1043,7 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr
|
|||
}
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
cr.Type = CacheRecordTypeSymlink
|
||||
link, err := os.Readlink(path)
|
||||
link, err := os.Readlink(itemPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
2
vendor/github.com/moby/buildkit/cache/contenthash/filehash.go
generated
vendored
2
vendor/github.com/moby/buildkit/cache/contenthash/filehash.go
generated
vendored
|
@ -51,6 +51,8 @@ func NewFromStat(stat *fstypes.Stat) (hash.Hash, error) {
|
|||
hdr.Name = "" // note: empty name is different from current has in docker build. Name is added on recursive directory scan instead
|
||||
hdr.Devmajor = stat.Devmajor
|
||||
hdr.Devminor = stat.Devminor
|
||||
hdr.Uid = int(stat.Uid)
|
||||
hdr.Gid = int(stat.Gid)
|
||||
|
||||
if len(stat.Xattrs) > 0 {
|
||||
hdr.PAXRecords = make(map[string]string, len(stat.Xattrs))
|
||||
|
|
4
vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go
generated
vendored
4
vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go
generated
vendored
|
@ -37,10 +37,10 @@ func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
|
|||
|
||||
func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
|
||||
pax := h.PAXRecords
|
||||
if len(h.Xattrs) > 0 { //nolint deprecated
|
||||
if len(h.Xattrs) > 0 { //nolint:staticcheck // field deprecated in stdlib
|
||||
if pax == nil {
|
||||
pax = map[string]string{}
|
||||
for k, v := range h.Xattrs { //nolint deprecated
|
||||
for k, v := range h.Xattrs { //nolint:staticcheck // field deprecated in stdlib
|
||||
pax["SCHILY.xattr."+k] = v
|
||||
}
|
||||
}
|
||||
|
|
150
vendor/github.com/moby/buildkit/cache/converter.go
generated
vendored
150
vendor/github.com/moby/buildkit/cache/converter.go
generated
vendored
|
@ -7,120 +7,46 @@ import (
|
|||
"io"
|
||||
"sync"
|
||||
|
||||
cdcompression "github.com/containerd/containerd/archive/compression"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/images/converter"
|
||||
"github.com/containerd/containerd/labels"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/util/bklog"
|
||||
"github.com/moby/buildkit/util/compression"
|
||||
"github.com/moby/buildkit/util/iohelper"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// needsConversion indicates whether a conversion is needed for the specified descriptor to
|
||||
// be the compressionType.
|
||||
func needsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, compressionType compression.Type) (bool, error) {
|
||||
mediaType := desc.MediaType
|
||||
switch compressionType {
|
||||
case compression.Uncompressed:
|
||||
if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Uncompressed {
|
||||
return false, nil
|
||||
}
|
||||
case compression.Gzip:
|
||||
esgz, err := isEStargz(ctx, cs, desc.Digest)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if (!images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Gzip) && !esgz {
|
||||
return false, nil
|
||||
}
|
||||
case compression.Zstd:
|
||||
if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Zstd {
|
||||
return false, nil
|
||||
}
|
||||
case compression.EStargz:
|
||||
esgz, err := isEStargz(ctx, cs, desc.Digest)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !images.IsLayerType(mediaType) || esgz {
|
||||
return false, nil
|
||||
}
|
||||
default:
|
||||
return false, fmt.Errorf("unknown compression type during conversion: %q", compressionType)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// getConverter returns converter function according to the specified compression type.
|
||||
// If no conversion is needed, this returns nil without error.
|
||||
func getConverter(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, comp compression.Config) (converter.ConvertFunc, error) {
|
||||
if needs, err := needsConversion(ctx, cs, desc, comp.Type); err != nil {
|
||||
if needs, err := comp.Type.NeedsConversion(ctx, cs, desc); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to determine conversion needs")
|
||||
} else if !needs {
|
||||
// No conversion. No need to return an error here.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
from, err := compression.FromMediaType(desc.MediaType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := conversion{target: comp}
|
||||
|
||||
from := compression.FromMediaType(desc.MediaType)
|
||||
switch from {
|
||||
case compression.Uncompressed:
|
||||
case compression.Gzip, compression.Zstd:
|
||||
c.decompress = func(ctx context.Context, desc ocispecs.Descriptor) (r io.ReadCloser, err error) {
|
||||
ra, err := cs.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
esgz, err := isEStargz(ctx, cs, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if esgz {
|
||||
r, err = decompressEStargz(io.NewSectionReader(ra, 0, ra.Size()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
r, err = cdcompression.DecompressStream(io.NewSectionReader(ra, 0, ra.Size()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &readCloser{r, ra.Close}, nil
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("unsupported source compression type %q from mediatype %q", from, desc.MediaType)
|
||||
}
|
||||
|
||||
switch comp.Type {
|
||||
case compression.Uncompressed:
|
||||
case compression.Gzip:
|
||||
c.compress = gzipWriter(comp)
|
||||
case compression.Zstd:
|
||||
c.compress = zstdWriter(comp)
|
||||
case compression.EStargz:
|
||||
compressorFunc, finalize := compressEStargz(comp)
|
||||
c.compress = func(w io.Writer) (io.WriteCloser, error) {
|
||||
return compressorFunc(w, ocispecs.MediaTypeImageLayerGzip)
|
||||
}
|
||||
c.finalize = finalize
|
||||
default:
|
||||
return nil, errors.Errorf("unknown target compression type during conversion: %q", comp.Type)
|
||||
}
|
||||
c.compress, c.finalize = comp.Type.Compress(ctx, comp)
|
||||
c.decompress = from.Decompress
|
||||
|
||||
return (&c).convert, nil
|
||||
}
|
||||
|
||||
type conversion struct {
|
||||
target compression.Config
|
||||
decompress func(context.Context, ocispecs.Descriptor) (io.ReadCloser, error)
|
||||
compress func(w io.Writer) (io.WriteCloser, error)
|
||||
finalize func(context.Context, content.Store) (map[string]string, error)
|
||||
decompress compression.Decompressor
|
||||
compress compression.Compressor
|
||||
finalize compression.Finalizer
|
||||
}
|
||||
|
||||
var bufioPool = sync.Pool{
|
||||
|
@ -151,34 +77,20 @@ func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispec
|
|||
bufW = bufio.NewWriterSize(w, 128*1024)
|
||||
}
|
||||
defer bufioPool.Put(bufW)
|
||||
var zw io.WriteCloser = &nopWriteCloser{bufW}
|
||||
if c.compress != nil {
|
||||
zw, err = c.compress(zw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zw, err := c.compress(&iohelper.NopWriteCloser{Writer: bufW}, c.target.Type.MediaType())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zw = &onceWriteCloser{WriteCloser: zw}
|
||||
defer zw.Close()
|
||||
|
||||
// convert this layer
|
||||
diffID := digest.Canonical.Digester()
|
||||
var rdr io.Reader
|
||||
if c.decompress == nil {
|
||||
ra, err := cs.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer ra.Close()
|
||||
rdr = io.NewSectionReader(ra, 0, ra.Size())
|
||||
} else {
|
||||
rc, err := c.decompress(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rc.Close()
|
||||
rdr = rc
|
||||
rdr, err := c.decompress(ctx, cs, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rdr.Close()
|
||||
if _, err := io.Copy(zw, io.TeeReader(rdr, diffID.Hash())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -201,7 +113,7 @@ func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispec
|
|||
}
|
||||
|
||||
newDesc := desc
|
||||
newDesc.MediaType = c.target.Type.DefaultMediaType()
|
||||
newDesc.MediaType = c.target.Type.MediaType()
|
||||
newDesc.Digest = info.Digest
|
||||
newDesc.Size = info.Size
|
||||
newDesc.Annotations = map[string]string{labels.LabelUncompressed: diffID.Digest().String()}
|
||||
|
@ -217,28 +129,6 @@ func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispec
|
|||
return &newDesc, nil
|
||||
}
|
||||
|
||||
type readCloser struct {
|
||||
io.ReadCloser
|
||||
closeFunc func() error
|
||||
}
|
||||
|
||||
func (rc *readCloser) Close() error {
|
||||
err1 := rc.ReadCloser.Close()
|
||||
err2 := rc.closeFunc()
|
||||
if err1 != nil {
|
||||
return errors.Wrapf(err1, "failed to close: %v", err2)
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
type nopWriteCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (w *nopWriteCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type onceWriteCloser struct {
|
||||
io.WriteCloser
|
||||
closeOnce sync.Once
|
||||
|
|
90
vendor/github.com/moby/buildkit/cache/filelist.go
generated
vendored
Normal file
90
vendor/github.com/moby/buildkit/cache/filelist.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
cdcompression "github.com/containerd/containerd/archive/compression"
|
||||
"github.com/moby/buildkit/session"
|
||||
)
|
||||
|
||||
const keyFileList = "filelist"
|
||||
|
||||
// FileList returns an ordered list of files present in the cache record that were
|
||||
// changed compared to the parent. The paths of the files are in same format as they
|
||||
// are in the tar stream (AUFS whiteout format). If the reference does not have a
|
||||
// a blob associated with it, the list is empty.
|
||||
func (sr *immutableRef) FileList(ctx context.Context, s session.Group) ([]string, error) {
|
||||
res, err := g.Do(ctx, fmt.Sprintf("filelist-%s", sr.ID()), func(ctx context.Context) (interface{}, error) {
|
||||
dt, err := sr.GetExternal(keyFileList)
|
||||
if err == nil && dt != nil {
|
||||
var files []string
|
||||
if err := json.Unmarshal(dt, &files); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
if sr.getBlob() == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// lazy blobs need to be pulled first
|
||||
if err := sr.Extract(ctx, s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
desc, err := sr.ociDesc(ctx, sr.descHandlers, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ra, err := sr.cm.ContentStore.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r, err := cdcompression.DecompressStream(io.NewSectionReader(ra, 0, ra.Size()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
var files []string
|
||||
|
||||
rdr := tar.NewReader(r)
|
||||
for {
|
||||
hdr, err := rdr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
name := path.Clean(hdr.Name)
|
||||
files = append(files, name)
|
||||
}
|
||||
sort.Strings(files)
|
||||
|
||||
dt, err = json.Marshal(files)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := sr.SetExternal(keyFileList, dt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return files, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return res.([]string), nil
|
||||
}
|
9
vendor/github.com/moby/buildkit/cache/manager.go
generated
vendored
9
vendor/github.com/moby/buildkit/cache/manager.go
generated
vendored
|
@ -301,7 +301,14 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor,
|
|||
|
||||
cm.records[id] = rec
|
||||
|
||||
return rec.ref(true, descHandlers, nil), nil
|
||||
ref := rec.ref(true, descHandlers, nil)
|
||||
if s := unlazySessionOf(opts...); s != nil {
|
||||
if err := ref.unlazy(ctx, ref.descHandlers, ref.progress, s, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
// init loads all snapshots from metadata state and tries to load the records
|
||||
|
|
4
vendor/github.com/moby/buildkit/cache/metadata.go
generated
vendored
4
vendor/github.com/moby/buildkit/cache/metadata.go
generated
vendored
|
@ -551,9 +551,7 @@ func (md *cacheMetadata) appendStringSlice(key string, values ...string) error {
|
|||
}
|
||||
|
||||
for _, existing := range slice {
|
||||
if _, ok := idx[existing]; ok {
|
||||
delete(idx, existing)
|
||||
}
|
||||
delete(idx, existing)
|
||||
}
|
||||
|
||||
if len(idx) == 0 {
|
||||
|
|
3
vendor/github.com/moby/buildkit/cache/metadata/metadata.go
generated
vendored
3
vendor/github.com/moby/buildkit/cache/metadata/metadata.go
generated
vendored
|
@ -317,6 +317,9 @@ func (s *StorageItem) Queue(fn func(b *bolt.Bucket) error) {
|
|||
func (s *StorageItem) Commit() error {
|
||||
s.qmu.Lock()
|
||||
defer s.qmu.Unlock()
|
||||
if len(s.queue) == 0 {
|
||||
return nil
|
||||
}
|
||||
return errors.WithStack(s.Update(func(b *bolt.Bucket) error {
|
||||
for _, fn := range s.queue {
|
||||
if err := fn(b); err != nil {
|
||||
|
|
11
vendor/github.com/moby/buildkit/cache/opts.go
generated
vendored
11
vendor/github.com/moby/buildkit/cache/opts.go
generated
vendored
|
@ -36,4 +36,13 @@ func (m NeedsRemoteProviderError) Error() string {
|
|||
return fmt.Sprintf("missing descriptor handlers for lazy blobs %+v", []digest.Digest(m))
|
||||
}
|
||||
|
||||
type ProgressKey struct{}
|
||||
type Unlazy session.Group
|
||||
|
||||
func unlazySessionOf(opts ...RefOption) session.Group {
|
||||
for _, opt := range opts {
|
||||
if opt, ok := opt.(session.Group); ok {
|
||||
return opt
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
28
vendor/github.com/moby/buildkit/cache/refs.go
generated
vendored
28
vendor/github.com/moby/buildkit/cache/refs.go
generated
vendored
|
@ -3,7 +3,6 @@ package cache
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
@ -37,6 +36,8 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var additionalAnnotations = append(compression.EStargzAnnotations, containerdUncompressed)
|
||||
|
||||
// Ref is a reference to cacheable objects.
|
||||
type Ref interface {
|
||||
Mountable
|
||||
|
@ -56,6 +57,7 @@ type ImmutableRef interface {
|
|||
Extract(ctx context.Context, s session.Group) error // +progress
|
||||
GetRemotes(ctx context.Context, createIfNeeded bool, cfg config.RefConfig, all bool, s session.Group) ([]*solver.Remote, error)
|
||||
LayerChain() RefList
|
||||
FileList(ctx context.Context, s session.Group) ([]string, error)
|
||||
}
|
||||
|
||||
type MutableRef interface {
|
||||
|
@ -768,12 +770,9 @@ func (sr *immutableRef) getBlobWithCompression(ctx context.Context, compressionT
|
|||
}
|
||||
|
||||
func getBlobWithCompression(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, compressionType compression.Type) (ocispecs.Descriptor, error) {
|
||||
if compressionType == compression.UnknownCompression {
|
||||
return ocispecs.Descriptor{}, fmt.Errorf("cannot get unknown compression type")
|
||||
}
|
||||
var target *ocispecs.Descriptor
|
||||
if err := walkBlob(ctx, cs, desc, func(desc ocispecs.Descriptor) bool {
|
||||
if needs, err := needsConversion(ctx, cs, desc, compressionType); err == nil && !needs {
|
||||
if needs, err := compressionType.NeedsConversion(ctx, cs, desc); err == nil && !needs {
|
||||
target = &desc
|
||||
return false
|
||||
}
|
||||
|
@ -838,11 +837,11 @@ func getBlobDesc(ctx context.Context, cs content.Store, dgst digest.Digest) (oci
|
|||
return ocispecs.Descriptor{}, err
|
||||
}
|
||||
if info.Labels == nil {
|
||||
return ocispecs.Descriptor{}, fmt.Errorf("no blob metadata is stored for %q", info.Digest)
|
||||
return ocispecs.Descriptor{}, errors.Errorf("no blob metadata is stored for %q", info.Digest)
|
||||
}
|
||||
mt, ok := info.Labels[blobMediaTypeLabel]
|
||||
if !ok {
|
||||
return ocispecs.Descriptor{}, fmt.Errorf("no media type is stored for %q", info.Digest)
|
||||
return ocispecs.Descriptor{}, errors.Errorf("no media type is stored for %q", info.Digest)
|
||||
}
|
||||
desc := ocispecs.Descriptor{
|
||||
Digest: info.Digest,
|
||||
|
@ -882,7 +881,7 @@ func filterAnnotationsForSave(a map[string]string) (b map[string]string) {
|
|||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
for _, k := range append(eStargzAnnotations, containerdUncompressed) {
|
||||
for _, k := range additionalAnnotations {
|
||||
v, ok := a[k]
|
||||
if !ok {
|
||||
continue
|
||||
|
@ -1552,12 +1551,12 @@ func readonlyOverlay(opt []string) []string {
|
|||
func newSharableMountPool(tmpdirRoot string) (sharableMountPool, error) {
|
||||
if tmpdirRoot != "" {
|
||||
if err := os.MkdirAll(tmpdirRoot, 0700); err != nil {
|
||||
return sharableMountPool{}, fmt.Errorf("failed to prepare mount pool: %w", err)
|
||||
return sharableMountPool{}, errors.Wrap(err, "failed to prepare mount pool")
|
||||
}
|
||||
// If tmpdirRoot is specified, remove existing mounts to avoid conflict.
|
||||
files, err := os.ReadDir(tmpdirRoot)
|
||||
if err != nil {
|
||||
return sharableMountPool{}, fmt.Errorf("failed to read mount pool: %w", err)
|
||||
return sharableMountPool{}, errors.Wrap(err, "failed to read mount pool")
|
||||
}
|
||||
for _, file := range files {
|
||||
if file.IsDir() {
|
||||
|
@ -1591,9 +1590,10 @@ func (p sharableMountPool) setSharable(mounts snapshot.Mountable) snapshot.Mount
|
|||
// This is useful to share writable overlayfs mounts.
|
||||
//
|
||||
// NOTE: Mount() method doesn't return the underlying mount configuration (e.g. overlayfs mounts)
|
||||
// instead it always return bind mounts of the temporary mount point. So if the caller
|
||||
// needs to inspect the underlying mount configuration (e.g. for optimized differ for
|
||||
// overlayfs), this wrapper shouldn't be used.
|
||||
//
|
||||
// instead it always return bind mounts of the temporary mount point. So if the caller
|
||||
// needs to inspect the underlying mount configuration (e.g. for optimized differ for
|
||||
// overlayfs), this wrapper shouldn't be used.
|
||||
type sharableMountable struct {
|
||||
snapshot.Mountable
|
||||
|
||||
|
@ -1631,7 +1631,7 @@ func (sm *sharableMountable) Mount() (_ []mount.Mount, _ func() error, retErr er
|
|||
// Don't need temporary mount wrapper for non-overlayfs mounts
|
||||
return mounts, release, nil
|
||||
}
|
||||
dir, err := ioutil.TempDir(sm.mountPoolRoot, "buildkit")
|
||||
dir, err := os.MkdirTemp(sm.mountPoolRoot, "buildkit")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
4
vendor/github.com/moby/buildkit/cache/remote.go
generated
vendored
4
vendor/github.com/moby/buildkit/cache/remote.go
generated
vendored
|
@ -212,8 +212,8 @@ func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, refC
|
|||
}
|
||||
}
|
||||
|
||||
if refCfg.Compression.Force {
|
||||
if needs, err := needsConversion(ctx, sr.cm.ContentStore, desc, refCfg.Compression.Type); err != nil {
|
||||
if needsForceCompression(ctx, sr.cm.ContentStore, desc, refCfg) {
|
||||
if needs, err := refCfg.Compression.Type.NeedsConversion(ctx, sr.cm.ContentStore, desc); err != nil {
|
||||
return nil, err
|
||||
} else if needs {
|
||||
// ensure the compression type.
|
||||
|
|
29
vendor/github.com/moby/buildkit/cache/remotecache/export.go
generated
vendored
29
vendor/github.com/moby/buildkit/cache/remotecache/export.go
generated
vendored
|
@ -5,7 +5,6 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
|
@ -24,24 +23,10 @@ import (
|
|||
|
||||
type ResolveCacheExporterFunc func(ctx context.Context, g session.Group, attrs map[string]string) (Exporter, error)
|
||||
|
||||
func oneOffProgress(ctx context.Context, id string) func(err error) error {
|
||||
pw, _, _ := progress.NewFromContext(ctx)
|
||||
now := time.Now()
|
||||
st := progress.Status{
|
||||
Started: &now,
|
||||
}
|
||||
pw.Write(id, st)
|
||||
return func(err error) error {
|
||||
now := time.Now()
|
||||
st.Completed = &now
|
||||
pw.Write(id, st)
|
||||
pw.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
type Exporter interface {
|
||||
solver.CacheExporterTarget
|
||||
// Name uniquely identifies the exporter
|
||||
Name() string
|
||||
// Finalize finalizes and return metadata that are returned to the client
|
||||
// e.g. ExporterResponseManifestDesc
|
||||
Finalize(ctx context.Context) (map[string]string, error)
|
||||
|
@ -72,6 +57,10 @@ func NewExporter(ingester content.Ingester, ref string, oci bool, compressionCon
|
|||
return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester, oci: oci, ref: ref, comp: compressionConfig}
|
||||
}
|
||||
|
||||
func (ce *contentCacheExporter) Name() string {
|
||||
return "exporting content cache"
|
||||
}
|
||||
|
||||
func (ce *contentCacheExporter) Config() Config {
|
||||
return Config{
|
||||
Compression: ce.comp,
|
||||
|
@ -107,7 +96,7 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string
|
|||
if !ok {
|
||||
return nil, errors.Errorf("missing blob %s", l.Blob)
|
||||
}
|
||||
layerDone := oneOffProgress(ctx, fmt.Sprintf("writing layer %s", l.Blob))
|
||||
layerDone := progress.OneOff(ctx, fmt.Sprintf("writing layer %s", l.Blob))
|
||||
if err := contentutil.Copy(ctx, ce.ingester, dgstPair.Provider, dgstPair.Descriptor, ce.ref, logs.LoggerFromContext(ctx)); err != nil {
|
||||
return nil, layerDone(errors.Wrap(err, "error writing layer blob"))
|
||||
}
|
||||
|
@ -127,7 +116,7 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string
|
|||
Size: int64(len(dt)),
|
||||
MediaType: v1.CacheConfigMediaTypeV0,
|
||||
}
|
||||
configDone := oneOffProgress(ctx, fmt.Sprintf("writing config %s", dgst))
|
||||
configDone := progress.OneOff(ctx, fmt.Sprintf("writing config %s", dgst))
|
||||
if err := content.WriteBlob(ctx, ce.ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil {
|
||||
return nil, configDone(errors.Wrap(err, "error writing config blob"))
|
||||
}
|
||||
|
@ -146,7 +135,7 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string
|
|||
Size: int64(len(dt)),
|
||||
MediaType: mfst.MediaType,
|
||||
}
|
||||
mfstDone := oneOffProgress(ctx, fmt.Sprintf("writing manifest %s", dgst))
|
||||
mfstDone := progress.OneOff(ctx, fmt.Sprintf("writing manifest %s", dgst))
|
||||
if err := content.WriteBlob(ctx, ce.ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil {
|
||||
return nil, mfstDone(errors.Wrap(err, "error writing manifest blob"))
|
||||
}
|
||||
|
|
4
vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go
generated
vendored
4
vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go
generated
vendored
|
@ -30,6 +30,10 @@ type exporter struct {
|
|||
chains *v1.CacheChains
|
||||
}
|
||||
|
||||
func (*exporter) Name() string {
|
||||
return "exporting inline cache"
|
||||
}
|
||||
|
||||
func (ce *exporter) Config() remotecache.Config {
|
||||
return remotecache.Config{
|
||||
Compression: compression.New(compression.Default),
|
||||
|
|
21
vendor/github.com/moby/buildkit/cache/remotecache/local/local.go
generated
vendored
21
vendor/github.com/moby/buildkit/cache/remotecache/local/local.go
generated
vendored
|
@ -98,15 +98,28 @@ func getContentStore(ctx context.Context, sm *session.Manager, g session.Group,
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sessioncontent.NewCallerStore(caller, storeID), nil
|
||||
return &unlazyProvider{sessioncontent.NewCallerStore(caller, storeID), g}, nil
|
||||
}
|
||||
|
||||
type unlazyProvider struct {
|
||||
content.Store
|
||||
s session.Group
|
||||
}
|
||||
|
||||
func (p *unlazyProvider) UnlazySession(desc ocispecs.Descriptor) session.Group {
|
||||
return p.s
|
||||
}
|
||||
|
||||
func attrsToCompression(attrs map[string]string) (*compression.Config, error) {
|
||||
compressionType := compression.Default
|
||||
var compressionType compression.Type
|
||||
if v, ok := attrs[attrLayerCompression]; ok {
|
||||
if c := compression.Parse(v); c != compression.UnknownCompression {
|
||||
compressionType = c
|
||||
c, err := compression.Parse(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
compressionType = c
|
||||
} else {
|
||||
compressionType = compression.Default
|
||||
}
|
||||
compressionConfig := compression.New(compressionType)
|
||||
if v, ok := attrs[attrForceCompression]; ok {
|
||||
|
|
10
vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go
generated
vendored
10
vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go
generated
vendored
|
@ -131,11 +131,15 @@ func (dsl *withDistributionSourceLabel) SnapshotLabels(descs []ocispecs.Descript
|
|||
}
|
||||
|
||||
func attrsToCompression(attrs map[string]string) (*compression.Config, error) {
|
||||
compressionType := compression.Default
|
||||
var compressionType compression.Type
|
||||
if v, ok := attrs[attrLayerCompression]; ok {
|
||||
if c := compression.Parse(v); c != compression.UnknownCompression {
|
||||
compressionType = c
|
||||
c, err := compression.Parse(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
compressionType = c
|
||||
} else {
|
||||
compressionType = compression.Default
|
||||
}
|
||||
compressionConfig := compression.New(compressionType)
|
||||
if v, ok := attrs[attrForceCompression]; ok {
|
||||
|
|
2
vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go
generated
vendored
2
vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go
generated
vendored
|
@ -276,7 +276,7 @@ func (cs *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheR
|
|||
// Any of blobs in the remote must meet the specified compression option.
|
||||
match := false
|
||||
for _, desc := range r.result.Descriptors {
|
||||
m := compressionopts.Type.IsMediaType(desc.MediaType)
|
||||
m := compression.IsMediaType(compressionopts.Type, desc.MediaType)
|
||||
match = match || m
|
||||
if compressionopts.Force && !m {
|
||||
match = false
|
||||
|
|
4
vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go
generated
vendored
4
vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go
generated
vendored
|
@ -146,7 +146,7 @@ func (c *item) removeLink(src *item) bool {
|
|||
return found
|
||||
}
|
||||
|
||||
func (c *item) AddResult(createdAt time.Time, result *solver.Remote) {
|
||||
func (c *item) AddResult(_ digest.Digest, _ int, createdAt time.Time, result *solver.Remote) {
|
||||
c.resultTime = createdAt
|
||||
c.result = result
|
||||
}
|
||||
|
@ -214,7 +214,7 @@ func (c *item) walkAllResults(fn func(i *item) error, visited map[*item]struct{}
|
|||
type nopRecord struct {
|
||||
}
|
||||
|
||||
func (c *nopRecord) AddResult(createdAt time.Time, result *solver.Remote) {
|
||||
func (c *nopRecord) AddResult(_ digest.Digest, _ int, createdAt time.Time, result *solver.Remote) {
|
||||
}
|
||||
|
||||
func (c *nopRecord) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) {
|
||||
|
|
18
vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go
generated
vendored
18
vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go
generated
vendored
|
@ -1,6 +1,6 @@
|
|||
package cacheimport
|
||||
|
||||
// Distibutable build cache
|
||||
// Distributable build cache
|
||||
//
|
||||
// Main manifest is OCI image index
|
||||
// https://github.com/opencontainers/image-spec/blob/master/image-index.md .
|
||||
|
@ -13,7 +13,7 @@ package cacheimport
|
|||
// Cache config file layout:
|
||||
//
|
||||
//{
|
||||
// "layers": [
|
||||
// "layers": [ <- layers contains references to blobs
|
||||
// {
|
||||
// "blob": "sha256:deadbeef", <- digest of layer blob in index
|
||||
// "parent": -1 <- index of parent layer, -1 if no parent
|
||||
|
@ -24,20 +24,26 @@ package cacheimport
|
|||
// }
|
||||
// ],
|
||||
//
|
||||
// "records": [
|
||||
// "records": [ <- records contains chains of cache keys
|
||||
// {
|
||||
// "digest": "sha256:deadbeef", <- base digest for the record
|
||||
// },
|
||||
// {
|
||||
// "digest": "sha256:deadbeef",
|
||||
// "output": 1, <- optional output index
|
||||
// "layers": [ <- optional array or layer chains
|
||||
// "layers": [ <- optional array of layer pointers
|
||||
// {
|
||||
// "createdat": "",
|
||||
// "layer": 1, <- index to the layer
|
||||
// "layer": 1, <- index to the layers array, layer is loaded with all of its parents
|
||||
// }
|
||||
// ],
|
||||
// "inputs": [ <- dependant records
|
||||
// "chains": [ <- optional array of layer pointer lists
|
||||
// {
|
||||
// "createdat": "",
|
||||
// "layers": [1], <- indexes to the layers array, all layers are loaded in specified order without parents
|
||||
// }
|
||||
// ],
|
||||
// "inputs": [ <- dependant records, this is how cache keys are linked together
|
||||
// [ <- index of the dependency (0)
|
||||
// {
|
||||
// "selector": "sel", <- optional selector
|
||||
|
|
4
vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go
generated
vendored
4
vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go
generated
vendored
|
@ -61,7 +61,7 @@ func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver.
|
|||
return nil, err
|
||||
}
|
||||
if remote != nil {
|
||||
r.AddResult(res.CreatedAt, remote)
|
||||
r.AddResult("", 0, res.CreatedAt, remote)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -86,7 +86,7 @@ func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver.
|
|||
}
|
||||
if remote != nil {
|
||||
remote.Provider = mp
|
||||
r.AddResult(res.CreatedAt, remote)
|
||||
r.AddResult("", 0, res.CreatedAt, remote)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
5
vendor/github.com/moby/buildkit/cache/util/fsutil.go
generated
vendored
5
vendor/github.com/moby/buildkit/cache/util/fsutil.go
generated
vendored
|
@ -3,7 +3,6 @@ package util
|
|||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
|
@ -59,7 +58,7 @@ func ReadFile(ctx context.Context, mount snapshot.Mountable, req ReadRequest) ([
|
|||
}
|
||||
|
||||
if req.Range == nil {
|
||||
dt, err = ioutil.ReadFile(fp)
|
||||
dt, err = os.ReadFile(fp)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
@ -68,7 +67,7 @@ func ReadFile(ctx context.Context, mount snapshot.Mountable, req ReadRequest) ([
|
|||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
dt, err = ioutil.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length)))
|
||||
dt, err = io.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length)))
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
|
|
22
vendor/github.com/moby/buildkit/client/build.go
generated
vendored
22
vendor/github.com/moby/buildkit/client/build.go
generated
vendored
|
@ -20,17 +20,14 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF
|
|||
}
|
||||
}()
|
||||
|
||||
if opt.Frontend != "" {
|
||||
return nil, errors.New("invalid SolveOpt, Build interface cannot use Frontend")
|
||||
}
|
||||
feOpts := opt.FrontendAttrs
|
||||
|
||||
opt.Frontend = ""
|
||||
|
||||
if product == "" {
|
||||
product = apicaps.ExportedProduct
|
||||
}
|
||||
|
||||
feOpts := opt.FrontendAttrs
|
||||
opt.FrontendAttrs = nil
|
||||
|
||||
workers, err := c.ListWorkers(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "listing workers for Build")
|
||||
|
@ -113,6 +110,19 @@ func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.Sta
|
|||
return g.gateway.StatFile(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (g *gatewayClientForBuild) Evaluate(ctx context.Context, in *gatewayapi.EvaluateRequest, opts ...grpc.CallOption) (*gatewayapi.EvaluateResponse, error) {
|
||||
if err := g.caps.Supports(gatewayapi.CapGatewayEvaluate); err != nil {
|
||||
if err2 := g.caps.Supports(gatewayapi.CapStatFile); err2 != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
|
||||
_, err := g.gateway.StatFile(ctx, &gatewayapi.StatFileRequest{Ref: in.Ref, Path: "."}, opts...)
|
||||
return &gatewayapi.EvaluateResponse{}, err
|
||||
}
|
||||
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
|
||||
return g.gateway.Evaluate(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (g *gatewayClientForBuild) Ping(ctx context.Context, in *gatewayapi.PingRequest, opts ...grpc.CallOption) (*gatewayapi.PongResponse, error) {
|
||||
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
|
||||
return g.gateway.Ping(ctx, in, opts...)
|
||||
|
|
14
vendor/github.com/moby/buildkit/client/client.go
generated
vendored
14
vendor/github.com/moby/buildkit/client/client.go
generated
vendored
|
@ -4,11 +4,12 @@ import (
|
|||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
"github.com/containerd/containerd/defaults"
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
|
@ -168,12 +169,16 @@ func (c *Client) setupDelegatedTracing(ctx context.Context, td TracerDelegate) e
|
|||
return td.SetSpanExporter(ctx, e)
|
||||
}
|
||||
|
||||
func (c *Client) controlClient() controlapi.ControlClient {
|
||||
func (c *Client) ControlClient() controlapi.ControlClient {
|
||||
return controlapi.NewControlClient(c.conn)
|
||||
}
|
||||
|
||||
func (c *Client) ContentClient() contentapi.ContentClient {
|
||||
return contentapi.NewContentClient(c.conn)
|
||||
}
|
||||
|
||||
func (c *Client) Dialer() session.Dialer {
|
||||
return grpchijack.Dialer(c.controlClient())
|
||||
return grpchijack.Dialer(c.ControlClient())
|
||||
}
|
||||
|
||||
func (c *Client) Close() error {
|
||||
|
@ -212,7 +217,7 @@ func WithCredentials(serverName, ca, cert, key string) ClientOpt {
|
|||
}
|
||||
|
||||
func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
|
||||
ca, err := ioutil.ReadFile(opts.CACert)
|
||||
ca, err := os.ReadFile(opts.CACert)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not read ca certificate")
|
||||
}
|
||||
|
@ -234,7 +239,6 @@ func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
|
|||
return nil, errors.Wrap(err, "could not read certificate/key")
|
||||
}
|
||||
cfg.Certificates = []tls.Certificate{cert}
|
||||
cfg.BuildNameToCertificate()
|
||||
}
|
||||
|
||||
return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil
|
||||
|
|
24
vendor/github.com/moby/buildkit/client/diskusage.go
generated
vendored
24
vendor/github.com/moby/buildkit/client/diskusage.go
generated
vendored
|
@ -10,18 +10,18 @@ import (
|
|||
)
|
||||
|
||||
type UsageInfo struct {
|
||||
ID string
|
||||
Mutable bool
|
||||
InUse bool
|
||||
Size int64
|
||||
ID string `json:"id"`
|
||||
Mutable bool `json:"mutable"`
|
||||
InUse bool `json:"inUse"`
|
||||
Size int64 `json:"size"`
|
||||
|
||||
CreatedAt time.Time
|
||||
LastUsedAt *time.Time
|
||||
UsageCount int
|
||||
Parents []string
|
||||
Description string
|
||||
RecordType UsageRecordType
|
||||
Shared bool
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
LastUsedAt *time.Time `json:"lastUsedAt"`
|
||||
UsageCount int `json:"usageCount"`
|
||||
Parents []string `json:"parents"`
|
||||
Description string `json:"description"`
|
||||
RecordType UsageRecordType `json:"recordType"`
|
||||
Shared bool `json:"shared"`
|
||||
}
|
||||
|
||||
func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) {
|
||||
|
@ -31,7 +31,7 @@ func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*Usa
|
|||
}
|
||||
|
||||
req := &controlapi.DiskUsageRequest{Filter: info.Filter}
|
||||
resp, err := c.controlClient().DiskUsage(ctx, req)
|
||||
resp, err := c.ControlClient().DiskUsage(ctx, req)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to call diskusage")
|
||||
}
|
||||
|
|
40
vendor/github.com/moby/buildkit/client/info.go
generated
vendored
Normal file
40
vendor/github.com/moby/buildkit/client/info.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
apitypes "github.com/moby/buildkit/api/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Info struct {
|
||||
BuildkitVersion BuildkitVersion `json:"buildkitVersion"`
|
||||
}
|
||||
|
||||
type BuildkitVersion struct {
|
||||
Package string `json:"package"`
|
||||
Version string `json:"version"`
|
||||
Revision string `json:"revision"`
|
||||
}
|
||||
|
||||
func (c *Client) Info(ctx context.Context) (*Info, error) {
|
||||
res, err := c.ControlClient().Info(ctx, &controlapi.InfoRequest{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to call info")
|
||||
}
|
||||
return &Info{
|
||||
BuildkitVersion: fromAPIBuildkitVersion(res.BuildkitVersion),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func fromAPIBuildkitVersion(in *apitypes.BuildkitVersion) BuildkitVersion {
|
||||
if in == nil {
|
||||
return BuildkitVersion{}
|
||||
}
|
||||
return BuildkitVersion{
|
||||
Package: in.Package,
|
||||
Version: in.Version,
|
||||
Revision: in.Revision,
|
||||
}
|
||||
}
|
13
vendor/github.com/moby/buildkit/client/llb/exec.go
generated
vendored
13
vendor/github.com/moby/buildkit/client/llb/exec.go
generated
vendored
|
@ -192,12 +192,13 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
|
|||
}
|
||||
|
||||
meta := &pb.Meta{
|
||||
Args: args,
|
||||
Env: env.ToArray(),
|
||||
Cwd: cwd,
|
||||
User: user,
|
||||
Hostname: hostname,
|
||||
CgroupParent: cgrpParent,
|
||||
Args: args,
|
||||
Env: env.ToArray(),
|
||||
Cwd: cwd,
|
||||
User: user,
|
||||
Hostname: hostname,
|
||||
CgroupParent: cgrpParent,
|
||||
RemoveMountStubsRecursive: true,
|
||||
}
|
||||
|
||||
extraHosts, err := getExtraHosts(e.base)(ctx, c)
|
||||
|
|
8
vendor/github.com/moby/buildkit/client/llb/marshal.go
generated
vendored
8
vendor/github.com/moby/buildkit/client/llb/marshal.go
generated
vendored
|
@ -2,7 +2,6 @@ package llb
|
|||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
|
@ -67,7 +66,7 @@ func WriteTo(def *Definition, w io.Writer) error {
|
|||
}
|
||||
|
||||
func ReadFrom(r io.Reader) (*Definition, error) {
|
||||
b, err := ioutil.ReadAll(r)
|
||||
b, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -88,10 +87,7 @@ func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) {
|
|||
c.Platform = p
|
||||
}
|
||||
|
||||
for _, wc := range override.WorkerConstraints {
|
||||
c.WorkerConstraints = append(c.WorkerConstraints, wc)
|
||||
}
|
||||
|
||||
c.WorkerConstraints = append(c.WorkerConstraints, override.WorkerConstraints...)
|
||||
c.Metadata = mergeMetadata(c.Metadata, override.Metadata)
|
||||
|
||||
if c.Platform == nil {
|
||||
|
|
22
vendor/github.com/moby/buildkit/client/llb/resolver.go
generated
vendored
22
vendor/github.com/moby/buildkit/client/llb/resolver.go
generated
vendored
|
@ -23,13 +23,35 @@ func ResolveDigest(v bool) ImageOption {
|
|||
})
|
||||
}
|
||||
|
||||
func WithLayerLimit(l int) ImageOption {
|
||||
return imageOptionFunc(func(ii *ImageInfo) {
|
||||
ii.layerLimit = &l
|
||||
})
|
||||
}
|
||||
|
||||
// ImageMetaResolver can resolve image config metadata from a reference
|
||||
type ImageMetaResolver interface {
|
||||
ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (digest.Digest, []byte, error)
|
||||
}
|
||||
|
||||
type ResolverType int
|
||||
|
||||
const (
|
||||
ResolverTypeRegistry ResolverType = iota
|
||||
ResolverTypeOCILayout
|
||||
)
|
||||
|
||||
type ResolveImageConfigOpt struct {
|
||||
ResolverType
|
||||
|
||||
Platform *ocispecs.Platform
|
||||
ResolveMode string
|
||||
LogName string
|
||||
|
||||
Store ResolveImageConfigOptStore
|
||||
}
|
||||
|
||||
type ResolveImageConfigOptStore struct {
|
||||
SessionID string
|
||||
StoreID string
|
||||
}
|
||||
|
|
71
vendor/github.com/moby/buildkit/client/llb/source.go
generated
vendored
71
vendor/github.com/moby/buildkit/client/llb/source.go
generated
vendored
|
@ -116,6 +116,11 @@ func Image(ref string, opts ...ImageOption) State {
|
|||
attrs[pb.AttrImageRecordType] = info.RecordType
|
||||
}
|
||||
|
||||
if ll := info.layerLimit; ll != nil {
|
||||
attrs[pb.AttrImageLayerLimit] = strconv.FormatInt(int64(*ll), 10)
|
||||
addCap(&info.Constraints, pb.CapSourceImageLayerLimit)
|
||||
}
|
||||
|
||||
src := NewSource("docker-image://"+ref, attrs, info.Constraints) // controversial
|
||||
if err != nil {
|
||||
src.err = err
|
||||
|
@ -127,8 +132,9 @@ func Image(ref string, opts ...ImageOption) State {
|
|||
p = c.Platform
|
||||
}
|
||||
_, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, ResolveImageConfigOpt{
|
||||
Platform: p,
|
||||
ResolveMode: info.resolveMode.String(),
|
||||
Platform: p,
|
||||
ResolveMode: info.resolveMode.String(),
|
||||
ResolverType: ResolverTypeRegistry,
|
||||
})
|
||||
if err != nil {
|
||||
return State{}, err
|
||||
|
@ -142,8 +148,9 @@ func Image(ref string, opts ...ImageOption) State {
|
|||
p = c.Platform
|
||||
}
|
||||
dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{
|
||||
Platform: p,
|
||||
ResolveMode: info.resolveMode.String(),
|
||||
Platform: p,
|
||||
ResolveMode: info.resolveMode.String(),
|
||||
ResolverType: ResolverTypeRegistry,
|
||||
})
|
||||
if err != nil {
|
||||
return State{}, err
|
||||
|
@ -204,6 +211,7 @@ type ImageInfo struct {
|
|||
metaResolver ImageMetaResolver
|
||||
resolveDigest bool
|
||||
resolveMode ResolveMode
|
||||
layerLimit *int
|
||||
RecordType string
|
||||
}
|
||||
|
||||
|
@ -446,6 +454,59 @@ func Differ(t DiffType, required bool) LocalOption {
|
|||
})
|
||||
}
|
||||
|
||||
func OCILayout(ref string, opts ...OCILayoutOption) State {
|
||||
gi := &OCILayoutInfo{}
|
||||
|
||||
for _, o := range opts {
|
||||
o.SetOCILayoutOption(gi)
|
||||
}
|
||||
attrs := map[string]string{}
|
||||
if gi.sessionID != "" {
|
||||
attrs[pb.AttrOCILayoutSessionID] = gi.sessionID
|
||||
}
|
||||
if gi.storeID != "" {
|
||||
attrs[pb.AttrOCILayoutStoreID] = gi.storeID
|
||||
}
|
||||
if gi.layerLimit != nil {
|
||||
attrs[pb.AttrOCILayoutLayerLimit] = strconv.FormatInt(int64(*gi.layerLimit), 10)
|
||||
}
|
||||
|
||||
addCap(&gi.Constraints, pb.CapSourceOCILayout)
|
||||
|
||||
source := NewSource("oci-layout://"+ref, attrs, gi.Constraints)
|
||||
return NewState(source.Output())
|
||||
}
|
||||
|
||||
type OCILayoutOption interface {
|
||||
SetOCILayoutOption(*OCILayoutInfo)
|
||||
}
|
||||
|
||||
type ociLayoutOptionFunc func(*OCILayoutInfo)
|
||||
|
||||
func (fn ociLayoutOptionFunc) SetOCILayoutOption(li *OCILayoutInfo) {
|
||||
fn(li)
|
||||
}
|
||||
|
||||
func OCIStore(sessionID string, storeID string) OCILayoutOption {
|
||||
return ociLayoutOptionFunc(func(oi *OCILayoutInfo) {
|
||||
oi.sessionID = sessionID
|
||||
oi.storeID = storeID
|
||||
})
|
||||
}
|
||||
|
||||
func OCILayerLimit(limit int) OCILayoutOption {
|
||||
return ociLayoutOptionFunc(func(oi *OCILayoutInfo) {
|
||||
oi.layerLimit = &limit
|
||||
})
|
||||
}
|
||||
|
||||
type OCILayoutInfo struct {
|
||||
constraintsWrapper
|
||||
sessionID string
|
||||
storeID string
|
||||
layerLimit *int
|
||||
}
|
||||
|
||||
type DiffType string
|
||||
|
||||
const (
|
||||
|
@ -549,7 +610,7 @@ func Chown(uid, gid int) HTTPOption {
|
|||
}
|
||||
|
||||
func platformSpecificSource(id string) bool {
|
||||
return strings.HasPrefix(id, "docker-image://")
|
||||
return strings.HasPrefix(id, "docker-image://") || strings.HasPrefix(id, "oci-layout://")
|
||||
}
|
||||
|
||||
func addCap(c *Constraints, id apicaps.CapID) {
|
||||
|
|
2
vendor/github.com/moby/buildkit/client/llb/sourcemap.go
generated
vendored
2
vendor/github.com/moby/buildkit/client/llb/sourcemap.go
generated
vendored
|
@ -61,7 +61,7 @@ func (smc *sourceMapCollector) Add(dgst digest.Digest, ls []*SourceLocation) {
|
|||
}
|
||||
smc.index[l.SourceMap] = idx
|
||||
}
|
||||
smc.locations[dgst] = ls
|
||||
smc.locations[dgst] = append(smc.locations[dgst], ls...)
|
||||
}
|
||||
|
||||
func (smc *sourceMapCollector) Marshal(ctx context.Context, co ...ConstraintsOpt) (*pb.Source, error) {
|
||||
|
|
12
vendor/github.com/moby/buildkit/client/llb/state.go
generated
vendored
12
vendor/github.com/moby/buildkit/client/llb/state.go
generated
vendored
|
@ -199,10 +199,10 @@ func marshal(ctx context.Context, v Vertex, def *Definition, s *sourceMapCollect
|
|||
if opMeta != nil {
|
||||
def.Metadata[dgst] = mergeMetadata(def.Metadata[dgst], *opMeta)
|
||||
}
|
||||
s.Add(dgst, sls)
|
||||
if _, ok := cache[dgst]; ok {
|
||||
return def, nil
|
||||
}
|
||||
s.Add(dgst, sls)
|
||||
def.Def = append(def.Def, dt)
|
||||
cache[dgst] = struct{}{}
|
||||
return def, nil
|
||||
|
@ -455,6 +455,7 @@ type ConstraintsOpt interface {
|
|||
HTTPOption
|
||||
ImageOption
|
||||
GitOption
|
||||
OCILayoutOption
|
||||
}
|
||||
|
||||
type constraintsOptFunc func(m *Constraints)
|
||||
|
@ -471,6 +472,10 @@ func (fn constraintsOptFunc) SetLocalOption(li *LocalInfo) {
|
|||
li.applyConstraints(fn)
|
||||
}
|
||||
|
||||
func (fn constraintsOptFunc) SetOCILayoutOption(oi *OCILayoutInfo) {
|
||||
oi.applyConstraints(fn)
|
||||
}
|
||||
|
||||
func (fn constraintsOptFunc) SetHTTPOption(hi *HTTPInfo) {
|
||||
hi.applyConstraints(fn)
|
||||
}
|
||||
|
@ -612,6 +617,7 @@ var (
|
|||
LinuxArmel = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm", Variant: "v6"})
|
||||
LinuxArm64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm64"})
|
||||
LinuxS390x = Platform(ocispecs.Platform{OS: "linux", Architecture: "s390x"})
|
||||
LinuxPpc64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "ppc64"})
|
||||
LinuxPpc64le = Platform(ocispecs.Platform{OS: "linux", Architecture: "ppc64le"})
|
||||
Darwin = Platform(ocispecs.Platform{OS: "darwin", Architecture: "amd64"})
|
||||
Windows = Platform(ocispecs.Platform{OS: "windows", Architecture: "amd64"})
|
||||
|
@ -619,9 +625,7 @@ var (
|
|||
|
||||
func Require(filters ...string) ConstraintsOpt {
|
||||
return constraintsOptFunc(func(c *Constraints) {
|
||||
for _, f := range filters {
|
||||
c.WorkerConstraints = append(c.WorkerConstraints, f)
|
||||
}
|
||||
c.WorkerConstraints = append(c.WorkerConstraints, filters...)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
203
vendor/github.com/moby/buildkit/client/ociindex/ociindex.go
generated
vendored
203
vendor/github.com/moby/buildkit/client/ociindex/ociindex.go
generated
vendored
|
@ -2,8 +2,9 @@ package ociindex
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/gofrs/flock"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
@ -11,16 +12,132 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// IndexJSONLockFileSuffix is the suffix of the lock file
|
||||
IndexJSONLockFileSuffix = ".lock"
|
||||
// indexFile is the name of the index file
|
||||
indexFile = "index.json"
|
||||
|
||||
// lockFileSuffix is the suffix of the lock file
|
||||
lockFileSuffix = ".lock"
|
||||
)
|
||||
|
||||
// PutDescToIndex puts desc to index with tag.
|
||||
// Existing manifests with the same tag will be removed from the index.
|
||||
func PutDescToIndex(index *ocispecs.Index, desc ocispecs.Descriptor, tag string) error {
|
||||
if index == nil {
|
||||
index = &ocispecs.Index{}
|
||||
type StoreIndex struct {
|
||||
indexPath string
|
||||
lockPath string
|
||||
}
|
||||
|
||||
func NewStoreIndex(storePath string) StoreIndex {
|
||||
indexPath := path.Join(storePath, indexFile)
|
||||
return StoreIndex{
|
||||
indexPath: indexPath,
|
||||
lockPath: indexPath + lockFileSuffix,
|
||||
}
|
||||
}
|
||||
|
||||
func (s StoreIndex) Read() (*ocispecs.Index, error) {
|
||||
lock := flock.New(s.lockPath)
|
||||
locked, err := lock.TryRLock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not lock %s", s.lockPath)
|
||||
}
|
||||
if !locked {
|
||||
return nil, errors.Errorf("could not lock %s", s.lockPath)
|
||||
}
|
||||
defer func() {
|
||||
lock.Unlock()
|
||||
os.RemoveAll(s.lockPath)
|
||||
}()
|
||||
|
||||
b, err := os.ReadFile(s.indexPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not read %s", s.indexPath)
|
||||
}
|
||||
var idx ocispecs.Index
|
||||
if err := json.Unmarshal(b, &idx); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not unmarshal %s (%q)", s.indexPath, string(b))
|
||||
}
|
||||
return &idx, nil
|
||||
}
|
||||
|
||||
func (s StoreIndex) Put(tag string, desc ocispecs.Descriptor) error {
|
||||
lock := flock.New(s.lockPath)
|
||||
locked, err := lock.TryLock()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not lock %s", s.lockPath)
|
||||
}
|
||||
if !locked {
|
||||
return errors.Errorf("could not lock %s", s.lockPath)
|
||||
}
|
||||
defer func() {
|
||||
lock.Unlock()
|
||||
os.RemoveAll(s.lockPath)
|
||||
}()
|
||||
|
||||
f, err := os.OpenFile(s.indexPath, os.O_RDWR|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not open %s", s.indexPath)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var idx ocispecs.Index
|
||||
b, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not read %s", s.indexPath)
|
||||
}
|
||||
if len(b) > 0 {
|
||||
if err := json.Unmarshal(b, &idx); err != nil {
|
||||
return errors.Wrapf(err, "could not unmarshal %s (%q)", s.indexPath, string(b))
|
||||
}
|
||||
}
|
||||
|
||||
if err = insertDesc(&idx, desc, tag); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b, err = json.Marshal(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = f.WriteAt(b, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = f.Truncate(int64(len(b))); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s StoreIndex) Get(tag string) (*ocispecs.Descriptor, error) {
|
||||
idx, err := s.Read()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, m := range idx.Manifests {
|
||||
if t, ok := m.Annotations[ocispecs.AnnotationRefName]; ok && t == tag {
|
||||
return &m, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s StoreIndex) GetSingle() (*ocispecs.Descriptor, error) {
|
||||
idx, err := s.Read()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(idx.Manifests) == 1 {
|
||||
return &idx.Manifests[0], nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// insertDesc puts desc to index with tag.
|
||||
// Existing manifests with the same tag will be removed from the index.
|
||||
func insertDesc(index *ocispecs.Index, desc ocispecs.Descriptor, tag string) error {
|
||||
if index == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if index.SchemaVersion == 0 {
|
||||
index.SchemaVersion = 2
|
||||
}
|
||||
|
@ -41,73 +158,3 @@ func PutDescToIndex(index *ocispecs.Index, desc ocispecs.Descriptor, tag string)
|
|||
index.Manifests = append(index.Manifests, desc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func PutDescToIndexJSONFileLocked(indexJSONPath string, desc ocispecs.Descriptor, tag string) error {
|
||||
lockPath := indexJSONPath + IndexJSONLockFileSuffix
|
||||
lock := flock.New(lockPath)
|
||||
locked, err := lock.TryLock()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not lock %s", lockPath)
|
||||
}
|
||||
if !locked {
|
||||
return errors.Errorf("could not lock %s", lockPath)
|
||||
}
|
||||
defer func() {
|
||||
lock.Unlock()
|
||||
os.RemoveAll(lockPath)
|
||||
}()
|
||||
f, err := os.OpenFile(indexJSONPath, os.O_RDWR|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not open %s", indexJSONPath)
|
||||
}
|
||||
defer f.Close()
|
||||
var idx ocispecs.Index
|
||||
b, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not read %s", indexJSONPath)
|
||||
}
|
||||
if len(b) > 0 {
|
||||
if err := json.Unmarshal(b, &idx); err != nil {
|
||||
return errors.Wrapf(err, "could not unmarshal %s (%q)", indexJSONPath, string(b))
|
||||
}
|
||||
}
|
||||
if err = PutDescToIndex(&idx, desc, tag); err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = json.Marshal(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = f.WriteAt(b, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = f.Truncate(int64(len(b))); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReadIndexJSONFileLocked(indexJSONPath string) (*ocispecs.Index, error) {
|
||||
lockPath := indexJSONPath + IndexJSONLockFileSuffix
|
||||
lock := flock.New(lockPath)
|
||||
locked, err := lock.TryRLock()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not lock %s", lockPath)
|
||||
}
|
||||
if !locked {
|
||||
return nil, errors.Errorf("could not lock %s", lockPath)
|
||||
}
|
||||
defer func() {
|
||||
lock.Unlock()
|
||||
os.RemoveAll(lockPath)
|
||||
}()
|
||||
b, err := ioutil.ReadFile(indexJSONPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not read %s", indexJSONPath)
|
||||
}
|
||||
var idx ocispecs.Index
|
||||
if err := json.Unmarshal(b, &idx); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not unmarshal %s (%q)", indexJSONPath, string(b))
|
||||
}
|
||||
return &idx, nil
|
||||
}
|
||||
|
|
2
vendor/github.com/moby/buildkit/client/prune.go
generated
vendored
2
vendor/github.com/moby/buildkit/client/prune.go
generated
vendored
|
@ -23,7 +23,7 @@ func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOpti
|
|||
if info.All {
|
||||
req.All = true
|
||||
}
|
||||
cl, err := c.controlClient().Prune(ctx, req)
|
||||
cl, err := c.ControlClient().Prune(ctx, req)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to call prune")
|
||||
}
|
||||
|
|
295
vendor/github.com/moby/buildkit/client/solve.go
generated
vendored
295
vendor/github.com/moby/buildkit/client/solve.go
generated
vendored
|
@ -2,6 +2,7 @@ package client
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
|
@ -14,16 +15,19 @@ import (
|
|||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/client/ociindex"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/session"
|
||||
sessioncontent "github.com/moby/buildkit/session/content"
|
||||
"github.com/moby/buildkit/session/filesync"
|
||||
"github.com/moby/buildkit/session/grpchijack"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
spb "github.com/moby/buildkit/sourcepolicy/pb"
|
||||
"github.com/moby/buildkit/util/bklog"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
fstypes "github.com/tonistiigi/fsutil/types"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
@ -32,6 +36,7 @@ import (
|
|||
type SolveOpt struct {
|
||||
Exports []ExportEntry
|
||||
LocalDirs map[string]string
|
||||
OCIStores map[string]content.Store
|
||||
SharedKey string
|
||||
Frontend string
|
||||
FrontendAttrs map[string]string
|
||||
|
@ -42,6 +47,9 @@ type SolveOpt struct {
|
|||
AllowedEntitlements []entitlements.Entitlement
|
||||
SharedSession *session.Session // TODO: refactor to better session syncing
|
||||
SessionPreInitialized bool // TODO: refactor to better session syncing
|
||||
Internal bool
|
||||
SourcePolicy *spb.Policy
|
||||
Ref string
|
||||
}
|
||||
|
||||
type ExportEntry struct {
|
||||
|
@ -88,6 +96,9 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
|||
}
|
||||
|
||||
ref := identity.NewID()
|
||||
if opt.Ref != "" {
|
||||
ref = opt.Ref
|
||||
}
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
statusContext, cancelStatus := context.WithCancel(context.Background())
|
||||
|
@ -122,6 +133,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
|||
ex = opt.Exports[0]
|
||||
}
|
||||
|
||||
storesToUpdate := []string{}
|
||||
|
||||
if !opt.SessionPreInitialized {
|
||||
if len(syncedDirs) > 0 {
|
||||
s.Allow(filesync.NewFSSyncProvider(syncedDirs))
|
||||
|
@ -131,50 +144,85 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
|||
s.Allow(a)
|
||||
}
|
||||
|
||||
contentStores := map[string]content.Store{}
|
||||
for key, store := range cacheOpt.contentStores {
|
||||
contentStores[key] = store
|
||||
}
|
||||
for key, store := range opt.OCIStores {
|
||||
key2 := "oci:" + key
|
||||
if _, ok := contentStores[key2]; ok {
|
||||
return nil, errors.Errorf("oci store key %q already exists", key)
|
||||
}
|
||||
contentStores[key2] = store
|
||||
}
|
||||
|
||||
var supportFile bool
|
||||
var supportDir bool
|
||||
switch ex.Type {
|
||||
case ExporterLocal:
|
||||
if ex.Output != nil {
|
||||
return nil, errors.New("output file writer is not supported by local exporter")
|
||||
}
|
||||
if ex.OutputDir == "" {
|
||||
return nil, errors.New("output directory is required for local exporter")
|
||||
}
|
||||
s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
|
||||
case ExporterOCI, ExporterDocker, ExporterTar:
|
||||
if ex.OutputDir != "" {
|
||||
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
|
||||
}
|
||||
supportDir = true
|
||||
case ExporterTar:
|
||||
supportFile = true
|
||||
case ExporterOCI, ExporterDocker:
|
||||
supportDir = ex.OutputDir != ""
|
||||
supportFile = ex.Output != nil
|
||||
}
|
||||
|
||||
if supportFile && supportDir {
|
||||
return nil, errors.Errorf("both file and directory output is not support by %s exporter", ex.Type)
|
||||
}
|
||||
if !supportFile && ex.Output != nil {
|
||||
return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
|
||||
}
|
||||
if !supportDir && ex.OutputDir != "" {
|
||||
return nil, errors.Errorf("output directory is not supported by %s exporter", ex.Type)
|
||||
}
|
||||
|
||||
if supportFile {
|
||||
if ex.Output == nil {
|
||||
return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
|
||||
}
|
||||
s.Allow(filesync.NewFSSyncTarget(ex.Output))
|
||||
default:
|
||||
if ex.Output != nil {
|
||||
return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
|
||||
}
|
||||
if supportDir {
|
||||
if ex.OutputDir == "" {
|
||||
return nil, errors.Errorf("output directory is required for %s exporter", ex.Type)
|
||||
}
|
||||
if ex.OutputDir != "" {
|
||||
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
|
||||
switch ex.Type {
|
||||
case ExporterOCI, ExporterDocker:
|
||||
if err := os.MkdirAll(ex.OutputDir, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cs, err := contentlocal.NewStore(ex.OutputDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contentStores["export"] = cs
|
||||
storesToUpdate = append(storesToUpdate, ex.OutputDir)
|
||||
default:
|
||||
s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
|
||||
}
|
||||
}
|
||||
|
||||
if len(cacheOpt.contentStores) > 0 {
|
||||
s.Allow(sessioncontent.NewAttachable(cacheOpt.contentStores))
|
||||
if len(contentStores) > 0 {
|
||||
s.Allow(sessioncontent.NewAttachable(contentStores))
|
||||
}
|
||||
|
||||
eg.Go(func() error {
|
||||
sd := c.sessionDialer
|
||||
if sd == nil {
|
||||
sd = grpchijack.Dialer(c.controlClient())
|
||||
sd = grpchijack.Dialer(c.ControlClient())
|
||||
}
|
||||
return s.Run(statusContext, sd)
|
||||
})
|
||||
}
|
||||
|
||||
frontendAttrs := map[string]string{}
|
||||
for k, v := range opt.FrontendAttrs {
|
||||
frontendAttrs[k] = v
|
||||
}
|
||||
for k, v := range cacheOpt.frontendAttrs {
|
||||
if opt.FrontendAttrs == nil {
|
||||
opt.FrontendAttrs = map[string]string{}
|
||||
}
|
||||
opt.FrontendAttrs[k] = v
|
||||
frontendAttrs[k] = v
|
||||
}
|
||||
|
||||
solveCtx, cancelSolve := context.WithCancel(ctx)
|
||||
|
@ -188,8 +236,10 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
|||
<-time.After(3 * time.Second)
|
||||
cancelStatus()
|
||||
}()
|
||||
bklog.G(ctx).Debugf("stopping session")
|
||||
s.Close()
|
||||
if !opt.SessionPreInitialized {
|
||||
bklog.G(ctx).Debugf("stopping session")
|
||||
s.Close()
|
||||
}
|
||||
}()
|
||||
var pbd *pb.Definition
|
||||
if def != nil {
|
||||
|
@ -205,17 +255,19 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
|||
frontendInputs[key] = def.ToPB()
|
||||
}
|
||||
|
||||
resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{
|
||||
resp, err := c.ControlClient().Solve(ctx, &controlapi.SolveRequest{
|
||||
Ref: ref,
|
||||
Definition: pbd,
|
||||
Exporter: ex.Type,
|
||||
ExporterAttrs: ex.Attrs,
|
||||
Session: s.ID(),
|
||||
Frontend: opt.Frontend,
|
||||
FrontendAttrs: opt.FrontendAttrs,
|
||||
FrontendAttrs: frontendAttrs,
|
||||
FrontendInputs: frontendInputs,
|
||||
Cache: cacheOpt.options,
|
||||
Entitlements: opt.AllowedEntitlements,
|
||||
Internal: opt.Internal,
|
||||
SourcePolicy: opt.SourcePolicy,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to solve")
|
||||
|
@ -228,7 +280,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
|||
|
||||
if runGateway != nil {
|
||||
eg.Go(func() error {
|
||||
err := runGateway(ref, s, opt.FrontendAttrs)
|
||||
err := runGateway(ref, s, frontendAttrs)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -249,7 +301,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
|||
}
|
||||
|
||||
eg.Go(func() error {
|
||||
stream, err := c.controlClient().Status(statusContext, &controlapi.StatusRequest{
|
||||
stream, err := c.ControlClient().Status(statusContext, &controlapi.StatusRequest{
|
||||
Ref: ref,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -263,52 +315,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
|||
}
|
||||
return errors.Wrap(err, "failed to receive status")
|
||||
}
|
||||
s := SolveStatus{}
|
||||
for _, v := range resp.Vertexes {
|
||||
s.Vertexes = append(s.Vertexes, &Vertex{
|
||||
Digest: v.Digest,
|
||||
Inputs: v.Inputs,
|
||||
Name: v.Name,
|
||||
Started: v.Started,
|
||||
Completed: v.Completed,
|
||||
Error: v.Error,
|
||||
Cached: v.Cached,
|
||||
ProgressGroup: v.ProgressGroup,
|
||||
})
|
||||
}
|
||||
for _, v := range resp.Statuses {
|
||||
s.Statuses = append(s.Statuses, &VertexStatus{
|
||||
ID: v.ID,
|
||||
Vertex: v.Vertex,
|
||||
Name: v.Name,
|
||||
Total: v.Total,
|
||||
Current: v.Current,
|
||||
Timestamp: v.Timestamp,
|
||||
Started: v.Started,
|
||||
Completed: v.Completed,
|
||||
})
|
||||
}
|
||||
for _, v := range resp.Logs {
|
||||
s.Logs = append(s.Logs, &VertexLog{
|
||||
Vertex: v.Vertex,
|
||||
Stream: int(v.Stream),
|
||||
Data: v.Msg,
|
||||
Timestamp: v.Timestamp,
|
||||
})
|
||||
}
|
||||
for _, v := range resp.Warnings {
|
||||
s.Warnings = append(s.Warnings, &VertexWarning{
|
||||
Vertex: v.Vertex,
|
||||
Level: int(v.Level),
|
||||
Short: v.Short,
|
||||
Detail: v.Detail,
|
||||
URL: v.Url,
|
||||
SourceInfo: v.Info,
|
||||
Range: v.Ranges,
|
||||
})
|
||||
}
|
||||
if statusChan != nil {
|
||||
statusChan <- &s
|
||||
statusChan <- NewSolveStatus(resp)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -323,8 +331,29 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
|||
if err = json.Unmarshal([]byte(manifestDescJSON), &manifestDesc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for indexJSONPath, tag := range cacheOpt.indicesToUpdate {
|
||||
if err = ociindex.PutDescToIndexJSONFileLocked(indexJSONPath, manifestDesc, tag); err != nil {
|
||||
for storePath, tag := range cacheOpt.storesToUpdate {
|
||||
idx := ociindex.NewStoreIndex(storePath)
|
||||
if err := idx.Put(tag, manifestDesc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if manifestDescDt := res.ExporterResponse[exptypes.ExporterImageDescriptorKey]; manifestDescDt != "" {
|
||||
manifestDescDt, err := base64.StdEncoding.DecodeString(manifestDescDt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var manifestDesc ocispecs.Descriptor
|
||||
if err = json.Unmarshal([]byte(manifestDescDt), &manifestDesc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, storePath := range storesToUpdate {
|
||||
tag := "latest"
|
||||
if t, ok := res.ExporterResponse["image.name"]; ok {
|
||||
tag = t
|
||||
}
|
||||
idx := ociindex.NewStoreIndex(storePath)
|
||||
if err := idx.Put(tag, manifestDesc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -332,7 +361,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
|||
return res, nil
|
||||
}
|
||||
|
||||
func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) {
|
||||
func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) (filesync.StaticDirSource, error) {
|
||||
for _, d := range localDirs {
|
||||
fi, err := os.Stat(d)
|
||||
if err != nil {
|
||||
|
@ -342,16 +371,16 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file
|
|||
return nil, errors.Errorf("%s not a directory", d)
|
||||
}
|
||||
}
|
||||
resetUIDAndGID := func(p string, st *fstypes.Stat) bool {
|
||||
resetUIDAndGID := func(p string, st *fstypes.Stat) fsutil.MapResult {
|
||||
st.Uid = 0
|
||||
st.Gid = 0
|
||||
return true
|
||||
return fsutil.MapResultKeep
|
||||
}
|
||||
|
||||
dirs := make([]filesync.SyncedDir, 0, len(localDirs))
|
||||
dirs := make(filesync.StaticDirSource, len(localDirs))
|
||||
if def == nil {
|
||||
for name, d := range localDirs {
|
||||
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID})
|
||||
dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID}
|
||||
}
|
||||
} else {
|
||||
for _, dt := range def.Def {
|
||||
|
@ -366,7 +395,7 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file
|
|||
if !ok {
|
||||
return nil, errors.Errorf("local directory %s not enabled", name)
|
||||
}
|
||||
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID})
|
||||
dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -383,24 +412,20 @@ func defaultSessionName() string {
|
|||
}
|
||||
|
||||
type cacheOptions struct {
|
||||
options controlapi.CacheOptions
|
||||
contentStores map[string]content.Store // key: ID of content store ("local:" + csDir)
|
||||
indicesToUpdate map[string]string // key: index.JSON file name, value: tag
|
||||
frontendAttrs map[string]string
|
||||
options controlapi.CacheOptions
|
||||
contentStores map[string]content.Store // key: ID of content store ("local:" + csDir)
|
||||
storesToUpdate map[string]string // key: path to content store, value: tag
|
||||
frontendAttrs map[string]string
|
||||
}
|
||||
|
||||
func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cacheOptions, error) {
|
||||
var (
|
||||
cacheExports []*controlapi.CacheOptionsEntry
|
||||
cacheImports []*controlapi.CacheOptionsEntry
|
||||
// legacy API is used for registry caches, because the daemon might not support the new API
|
||||
legacyExportRef string
|
||||
legacyImportRefs []string
|
||||
)
|
||||
contentStores := make(map[string]content.Store)
|
||||
indicesToUpdate := make(map[string]string) // key: index.JSON file name, value: tag
|
||||
storesToUpdate := make(map[string]string)
|
||||
frontendAttrs := make(map[string]string)
|
||||
legacyExportAttrs := make(map[string]string)
|
||||
for _, ex := range opt.CacheExports {
|
||||
if ex.Type == "local" {
|
||||
csDir := ex.Attrs["dest"]
|
||||
|
@ -415,26 +440,26 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
|
|||
return nil, err
|
||||
}
|
||||
contentStores["local:"+csDir] = cs
|
||||
// TODO(AkihiroSuda): support custom index JSON path and tag
|
||||
indexJSONPath := filepath.Join(csDir, "index.json")
|
||||
indicesToUpdate[indexJSONPath] = "latest"
|
||||
}
|
||||
if ex.Type == "registry" && legacyExportRef == "" {
|
||||
legacyExportRef = ex.Attrs["ref"]
|
||||
for k, v := range ex.Attrs {
|
||||
if k != "ref" {
|
||||
legacyExportAttrs[k] = v
|
||||
}
|
||||
|
||||
tag := "latest"
|
||||
if t, ok := ex.Attrs["tag"]; ok {
|
||||
tag = t
|
||||
}
|
||||
} else {
|
||||
cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{
|
||||
Type: ex.Type,
|
||||
Attrs: ex.Attrs,
|
||||
})
|
||||
// TODO(AkihiroSuda): support custom index JSON path and tag
|
||||
storesToUpdate[csDir] = tag
|
||||
}
|
||||
if ex.Type == "registry" {
|
||||
regRef := ex.Attrs["ref"]
|
||||
if regRef == "" {
|
||||
return nil, errors.New("registry cache exporter requires ref")
|
||||
}
|
||||
}
|
||||
cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{
|
||||
Type: ex.Type,
|
||||
Attrs: ex.Attrs,
|
||||
})
|
||||
}
|
||||
for _, im := range opt.CacheImports {
|
||||
attrs := im.Attrs
|
||||
if im.Type == "local" {
|
||||
csDir := im.Attrs["src"]
|
||||
if csDir == "" {
|
||||
|
@ -445,41 +470,40 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
|
|||
bklog.G(ctx).Warning("local cache import at " + csDir + " not found due to err: " + err.Error())
|
||||
continue
|
||||
}
|
||||
// if digest is not specified, load from "latest" tag
|
||||
if attrs["digest"] == "" {
|
||||
idx, err := ociindex.ReadIndexJSONFileLocked(filepath.Join(csDir, "index.json"))
|
||||
// if digest is not specified, attempt to load from tag
|
||||
if im.Attrs["digest"] == "" {
|
||||
tag := "latest"
|
||||
if t, ok := im.Attrs["tag"]; ok {
|
||||
tag = t
|
||||
}
|
||||
|
||||
idx := ociindex.NewStoreIndex(csDir)
|
||||
desc, err := idx.Get(tag)
|
||||
if err != nil {
|
||||
bklog.G(ctx).Warning("local cache import at " + csDir + " not found due to err: " + err.Error())
|
||||
continue
|
||||
}
|
||||
for _, m := range idx.Manifests {
|
||||
if (m.Annotations[ocispecs.AnnotationRefName] == "latest" && attrs["tag"] == "") || (attrs["tag"] != "" && m.Annotations[ocispecs.AnnotationRefName] == attrs["tag"]) {
|
||||
attrs["digest"] = string(m.Digest)
|
||||
break
|
||||
}
|
||||
}
|
||||
if attrs["digest"] == "" {
|
||||
return nil, errors.New("local cache importer requires either explicit digest, \"latest\" tag or custom tag on index.json")
|
||||
if desc != nil {
|
||||
im.Attrs["digest"] = desc.Digest.String()
|
||||
}
|
||||
}
|
||||
if im.Attrs["digest"] == "" {
|
||||
return nil, errors.New("local cache importer requires either explicit digest, \"latest\" tag or custom tag on index.json")
|
||||
}
|
||||
contentStores["local:"+csDir] = cs
|
||||
}
|
||||
if im.Type == "registry" {
|
||||
legacyImportRef := attrs["ref"]
|
||||
legacyImportRefs = append(legacyImportRefs, legacyImportRef)
|
||||
} else {
|
||||
cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{
|
||||
Type: im.Type,
|
||||
Attrs: attrs,
|
||||
})
|
||||
regRef := im.Attrs["ref"]
|
||||
if regRef == "" {
|
||||
return nil, errors.New("registry cache importer requires ref")
|
||||
}
|
||||
}
|
||||
cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{
|
||||
Type: im.Type,
|
||||
Attrs: im.Attrs,
|
||||
})
|
||||
}
|
||||
if opt.Frontend != "" || isGateway {
|
||||
// use legacy API for registry importers, because the frontend might not support the new API
|
||||
if len(legacyImportRefs) > 0 {
|
||||
frontendAttrs["cache-from"] = strings.Join(legacyImportRefs, ",")
|
||||
}
|
||||
// use new API for other importers
|
||||
if len(cacheImports) > 0 {
|
||||
s, err := json.Marshal(cacheImports)
|
||||
if err != nil {
|
||||
|
@ -490,17 +514,12 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
|
|||
}
|
||||
res := cacheOptions{
|
||||
options: controlapi.CacheOptions{
|
||||
// old API (for registry caches, planned to be removed in early 2019)
|
||||
ExportRefDeprecated: legacyExportRef,
|
||||
ExportAttrsDeprecated: legacyExportAttrs,
|
||||
ImportRefsDeprecated: legacyImportRefs,
|
||||
// new API
|
||||
Exports: cacheExports,
|
||||
Imports: cacheImports,
|
||||
},
|
||||
contentStores: contentStores,
|
||||
indicesToUpdate: indicesToUpdate,
|
||||
frontendAttrs: frontendAttrs,
|
||||
contentStores: contentStores,
|
||||
storesToUpdate: storesToUpdate,
|
||||
frontendAttrs: frontendAttrs,
|
||||
}
|
||||
return &res, nil
|
||||
}
|
||||
|
|
125
vendor/github.com/moby/buildkit/client/status.go
generated
vendored
Normal file
125
vendor/github.com/moby/buildkit/client/status.go
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
)
|
||||
|
||||
var emptyLogVertexSize int
|
||||
|
||||
func init() {
|
||||
emptyLogVertex := controlapi.VertexLog{}
|
||||
emptyLogVertexSize = emptyLogVertex.Size()
|
||||
}
|
||||
|
||||
func NewSolveStatus(resp *controlapi.StatusResponse) *SolveStatus {
|
||||
s := &SolveStatus{}
|
||||
for _, v := range resp.Vertexes {
|
||||
s.Vertexes = append(s.Vertexes, &Vertex{
|
||||
Digest: v.Digest,
|
||||
Inputs: v.Inputs,
|
||||
Name: v.Name,
|
||||
Started: v.Started,
|
||||
Completed: v.Completed,
|
||||
Error: v.Error,
|
||||
Cached: v.Cached,
|
||||
ProgressGroup: v.ProgressGroup,
|
||||
})
|
||||
}
|
||||
for _, v := range resp.Statuses {
|
||||
s.Statuses = append(s.Statuses, &VertexStatus{
|
||||
ID: v.ID,
|
||||
Vertex: v.Vertex,
|
||||
Name: v.Name,
|
||||
Total: v.Total,
|
||||
Current: v.Current,
|
||||
Timestamp: v.Timestamp,
|
||||
Started: v.Started,
|
||||
Completed: v.Completed,
|
||||
})
|
||||
}
|
||||
for _, v := range resp.Logs {
|
||||
s.Logs = append(s.Logs, &VertexLog{
|
||||
Vertex: v.Vertex,
|
||||
Stream: int(v.Stream),
|
||||
Data: v.Msg,
|
||||
Timestamp: v.Timestamp,
|
||||
})
|
||||
}
|
||||
for _, v := range resp.Warnings {
|
||||
s.Warnings = append(s.Warnings, &VertexWarning{
|
||||
Vertex: v.Vertex,
|
||||
Level: int(v.Level),
|
||||
Short: v.Short,
|
||||
Detail: v.Detail,
|
||||
URL: v.Url,
|
||||
SourceInfo: v.Info,
|
||||
Range: v.Ranges,
|
||||
})
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (ss *SolveStatus) Marshal() (out []*controlapi.StatusResponse) {
|
||||
logSize := 0
|
||||
for {
|
||||
retry := false
|
||||
sr := controlapi.StatusResponse{}
|
||||
for _, v := range ss.Vertexes {
|
||||
sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{
|
||||
Digest: v.Digest,
|
||||
Inputs: v.Inputs,
|
||||
Name: v.Name,
|
||||
Started: v.Started,
|
||||
Completed: v.Completed,
|
||||
Error: v.Error,
|
||||
Cached: v.Cached,
|
||||
ProgressGroup: v.ProgressGroup,
|
||||
})
|
||||
}
|
||||
for _, v := range ss.Statuses {
|
||||
sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{
|
||||
ID: v.ID,
|
||||
Vertex: v.Vertex,
|
||||
Name: v.Name,
|
||||
Current: v.Current,
|
||||
Total: v.Total,
|
||||
Timestamp: v.Timestamp,
|
||||
Started: v.Started,
|
||||
Completed: v.Completed,
|
||||
})
|
||||
}
|
||||
for i, v := range ss.Logs {
|
||||
sr.Logs = append(sr.Logs, &controlapi.VertexLog{
|
||||
Vertex: v.Vertex,
|
||||
Stream: int64(v.Stream),
|
||||
Msg: v.Data,
|
||||
Timestamp: v.Timestamp,
|
||||
})
|
||||
logSize += len(v.Data) + emptyLogVertexSize
|
||||
// avoid logs growing big and split apart if they do
|
||||
if logSize > 1024*1024 {
|
||||
ss.Vertexes = nil
|
||||
ss.Statuses = nil
|
||||
ss.Logs = ss.Logs[i+1:]
|
||||
retry = true
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, v := range ss.Warnings {
|
||||
sr.Warnings = append(sr.Warnings, &controlapi.VertexWarning{
|
||||
Vertex: v.Vertex,
|
||||
Level: int64(v.Level),
|
||||
Short: v.Short,
|
||||
Detail: v.Detail,
|
||||
Info: v.SourceInfo,
|
||||
Ranges: v.Range,
|
||||
Url: v.URL,
|
||||
})
|
||||
}
|
||||
out = append(out, &sr)
|
||||
if !retry {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
20
vendor/github.com/moby/buildkit/client/workers.go
generated
vendored
20
vendor/github.com/moby/buildkit/client/workers.go
generated
vendored
|
@ -13,10 +13,11 @@ import (
|
|||
|
||||
// WorkerInfo contains information about a worker
|
||||
type WorkerInfo struct {
|
||||
ID string `json:"id"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
Platforms []ocispecs.Platform `json:"platforms"`
|
||||
GCPolicy []PruneInfo `json:"gcPolicy"`
|
||||
ID string `json:"id"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
Platforms []ocispecs.Platform `json:"platforms"`
|
||||
GCPolicy []PruneInfo `json:"gcPolicy"`
|
||||
BuildkitVersion BuildkitVersion `json:"buildkitVersion"`
|
||||
}
|
||||
|
||||
// ListWorkers lists all active workers
|
||||
|
@ -27,7 +28,7 @@ func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]
|
|||
}
|
||||
|
||||
req := &controlapi.ListWorkersRequest{Filter: info.Filter}
|
||||
resp, err := c.controlClient().ListWorkers(ctx, req)
|
||||
resp, err := c.ControlClient().ListWorkers(ctx, req)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to list workers")
|
||||
}
|
||||
|
@ -36,10 +37,11 @@ func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]
|
|||
|
||||
for _, w := range resp.Record {
|
||||
wi = append(wi, &WorkerInfo{
|
||||
ID: w.ID,
|
||||
Labels: w.Labels,
|
||||
Platforms: pb.ToSpecPlatforms(w.Platforms),
|
||||
GCPolicy: fromAPIGCPolicy(w.GCPolicy),
|
||||
ID: w.ID,
|
||||
Labels: w.Labels,
|
||||
Platforms: pb.ToSpecPlatforms(w.Platforms),
|
||||
GCPolicy: fromAPIGCPolicy(w.GCPolicy),
|
||||
BuildkitVersion: fromAPIBuildkitVersion(w.BuildkitVersion),
|
||||
})
|
||||
}
|
||||
|
||||
|
|
132
vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go
generated
vendored
Normal file
132
vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go
generated
vendored
Normal file
|
@ -0,0 +1,132 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
resolverconfig "github.com/moby/buildkit/util/resolver/config"
|
||||
)
|
||||
|
||||
// Config provides containerd configuration data for the server
|
||||
type Config struct {
|
||||
Debug bool `toml:"debug"`
|
||||
|
||||
// Root is the path to a directory where buildkit will store persistent data
|
||||
Root string `toml:"root"`
|
||||
|
||||
// Entitlements e.g. security.insecure, network.host
|
||||
Entitlements []string `toml:"insecure-entitlements"`
|
||||
// GRPC configuration settings
|
||||
GRPC GRPCConfig `toml:"grpc"`
|
||||
|
||||
Workers struct {
|
||||
OCI OCIConfig `toml:"oci"`
|
||||
Containerd ContainerdConfig `toml:"containerd"`
|
||||
} `toml:"worker"`
|
||||
|
||||
Registries map[string]resolverconfig.RegistryConfig `toml:"registry"`
|
||||
|
||||
DNS *DNSConfig `toml:"dns"`
|
||||
|
||||
History *HistoryConfig `toml:"history"`
|
||||
}
|
||||
|
||||
type GRPCConfig struct {
|
||||
Address []string `toml:"address"`
|
||||
DebugAddress string `toml:"debugAddress"`
|
||||
UID *int `toml:"uid"`
|
||||
GID *int `toml:"gid"`
|
||||
|
||||
TLS TLSConfig `toml:"tls"`
|
||||
// MaxRecvMsgSize int `toml:"max_recv_message_size"`
|
||||
// MaxSendMsgSize int `toml:"max_send_message_size"`
|
||||
}
|
||||
|
||||
type TLSConfig struct {
|
||||
Cert string `toml:"cert"`
|
||||
Key string `toml:"key"`
|
||||
CA string `toml:"ca"`
|
||||
}
|
||||
|
||||
type GCConfig struct {
|
||||
GC *bool `toml:"gc"`
|
||||
GCKeepStorage int64 `toml:"gckeepstorage"`
|
||||
GCPolicy []GCPolicy `toml:"gcpolicy"`
|
||||
}
|
||||
|
||||
type NetworkConfig struct {
|
||||
Mode string `toml:"networkMode"`
|
||||
CNIConfigPath string `toml:"cniConfigPath"`
|
||||
CNIBinaryPath string `toml:"cniBinaryPath"`
|
||||
CNIPoolSize int `toml:"cniPoolSize"`
|
||||
}
|
||||
|
||||
type OCIConfig struct {
|
||||
Enabled *bool `toml:"enabled"`
|
||||
Labels map[string]string `toml:"labels"`
|
||||
Platforms []string `toml:"platforms"`
|
||||
Snapshotter string `toml:"snapshotter"`
|
||||
Rootless bool `toml:"rootless"`
|
||||
NoProcessSandbox bool `toml:"noProcessSandbox"`
|
||||
GCConfig
|
||||
NetworkConfig
|
||||
// UserRemapUnsupported is unsupported key for testing. The feature is
|
||||
// incomplete and the intention is to make it default without config.
|
||||
UserRemapUnsupported string `toml:"userRemapUnsupported"`
|
||||
// For use in storing the OCI worker binary name that will replace buildkit-runc
|
||||
Binary string `toml:"binary"`
|
||||
ProxySnapshotterPath string `toml:"proxySnapshotterPath"`
|
||||
DefaultCgroupParent string `toml:"defaultCgroupParent"`
|
||||
|
||||
// StargzSnapshotterConfig is configuration for stargz snapshotter.
|
||||
// We use a generic map[string]interface{} in order to remove the dependency
|
||||
// on stargz snapshotter's config pkg from our config.
|
||||
StargzSnapshotterConfig map[string]interface{} `toml:"stargzSnapshotter"`
|
||||
|
||||
// ApparmorProfile is the name of the apparmor profile that should be used to constrain build containers.
|
||||
// The profile should already be loaded (by a higher level system) before creating a worker.
|
||||
ApparmorProfile string `toml:"apparmor-profile"`
|
||||
|
||||
// SELinux enables applying SELinux labels.
|
||||
SELinux bool `toml:"selinux"`
|
||||
|
||||
// MaxParallelism is the maximum number of parallel build steps that can be run at the same time.
|
||||
MaxParallelism int `toml:"max-parallelism"`
|
||||
}
|
||||
|
||||
type ContainerdConfig struct {
|
||||
Address string `toml:"address"`
|
||||
Enabled *bool `toml:"enabled"`
|
||||
Labels map[string]string `toml:"labels"`
|
||||
Platforms []string `toml:"platforms"`
|
||||
Namespace string `toml:"namespace"`
|
||||
GCConfig
|
||||
NetworkConfig
|
||||
Snapshotter string `toml:"snapshotter"`
|
||||
|
||||
// ApparmorProfile is the name of the apparmor profile that should be used to constrain build containers.
|
||||
// The profile should already be loaded (by a higher level system) before creating a worker.
|
||||
ApparmorProfile string `toml:"apparmor-profile"`
|
||||
|
||||
// SELinux enables applying SELinux labels.
|
||||
SELinux bool `toml:"selinux"`
|
||||
|
||||
MaxParallelism int `toml:"max-parallelism"`
|
||||
|
||||
Rootless bool `toml:"rootless"`
|
||||
}
|
||||
|
||||
type GCPolicy struct {
|
||||
All bool `toml:"all"`
|
||||
KeepBytes int64 `toml:"keepBytes"`
|
||||
KeepDuration int64 `toml:"keepDuration"`
|
||||
Filters []string `toml:"filters"`
|
||||
}
|
||||
|
||||
type DNSConfig struct {
|
||||
Nameservers []string `toml:"nameservers"`
|
||||
Options []string `toml:"options"`
|
||||
SearchDomains []string `toml:"searchDomains"`
|
||||
}
|
||||
|
||||
type HistoryConfig struct {
|
||||
MaxAge int64 `toml:"maxAge"`
|
||||
MaxEntries int64 `toml:"maxEntries"`
|
||||
}
|
31
vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go
generated
vendored
Normal file
31
vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
package config
|
||||
|
||||
const defaultCap int64 = 2e9 // 2GB
|
||||
|
||||
func DefaultGCPolicy(p string, keep int64) []GCPolicy {
|
||||
if keep == 0 {
|
||||
keep = DetectDefaultGCCap(p)
|
||||
}
|
||||
return []GCPolicy{
|
||||
// if build cache uses more than 512MB delete the most easily reproducible data after it has not been used for 2 days
|
||||
{
|
||||
Filters: []string{"type==source.local,type==exec.cachemount,type==source.git.checkout"},
|
||||
KeepDuration: 48 * 3600, // 48h
|
||||
KeepBytes: 512 * 1e6, // 512MB
|
||||
},
|
||||
// remove any data not used for 60 days
|
||||
{
|
||||
KeepDuration: 60 * 24 * 3600, // 60d
|
||||
KeepBytes: keep,
|
||||
},
|
||||
// keep the unshared build cache under cap
|
||||
{
|
||||
KeepBytes: keep,
|
||||
},
|
||||
// if previous policies were insufficient start deleting internal data to keep build cache under cap
|
||||
{
|
||||
All: true,
|
||||
KeepBytes: keep,
|
||||
},
|
||||
}
|
||||
}
|
18
vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go
generated
vendored
Normal file
18
vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func DetectDefaultGCCap(root string) int64 {
|
||||
var st syscall.Statfs_t
|
||||
if err := syscall.Statfs(root, &st); err != nil {
|
||||
return defaultCap
|
||||
}
|
||||
diskSize := int64(st.Bsize) * int64(st.Blocks)
|
||||
avail := diskSize / 10
|
||||
return (avail/(1<<30) + 1) * 1e9 // round up
|
||||
}
|
8
vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go
generated
vendored
Normal file
8
vendor/github.com/moby/buildkit/cmd/buildkitd/config/gcpolicy_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package config
|
||||
|
||||
func DetectDefaultGCCap(root string) int64 {
|
||||
return defaultCap
|
||||
}
|
36
vendor/github.com/moby/buildkit/cmd/buildkitd/config/load.go
generated
vendored
Normal file
36
vendor/github.com/moby/buildkit/cmd/buildkitd/config/load.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/pelletier/go-toml"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Load loads buildkitd config
|
||||
func Load(r io.Reader) (Config, error) {
|
||||
var c Config
|
||||
t, err := toml.LoadReader(r)
|
||||
if err != nil {
|
||||
return c, errors.Wrap(err, "failed to parse config")
|
||||
}
|
||||
err = t.Unmarshal(&c)
|
||||
if err != nil {
|
||||
return c, errors.Wrap(err, "failed to parse config")
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// LoadFile loads buildkitd config file
|
||||
func LoadFile(fp string) (Config, error) {
|
||||
f, err := os.Open(fp)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
return Config{}, nil
|
||||
}
|
||||
return Config{}, errors.Wrapf(err, "failed to load config from %s", fp)
|
||||
}
|
||||
defer f.Close()
|
||||
return Load(f)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue