diff --git a/builder/builder-next/adapters/containerimage/pull.go b/builder/builder-next/adapters/containerimage/pull.go index 65ee7a329c9202514461fab252c04bcf94de887e..733a337e87d179f0620453269b4e0ecc2fadbd45 100644 --- a/builder/builder-next/adapters/containerimage/pull.go +++ b/builder/builder-next/adapters/containerimage/pull.go @@ -9,7 +9,6 @@ import ( "path" "runtime" "sync" - "sync/atomic" "time" "github.com/containerd/containerd/content" @@ -31,6 +30,7 @@ import ( "github.com/moby/buildkit/cache" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" "github.com/moby/buildkit/source" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/imageutil" @@ -59,18 +59,12 @@ type SourceOpt struct { // Source is the source implementation for accessing container images type Source struct { SourceOpt - g flightcontrol.Group - resolverCache *resolverCache + g flightcontrol.Group } // NewSource creates a new image source func NewSource(opt SourceOpt) (*Source, error) { - is := &Source{ - SourceOpt: opt, - resolverCache: newResolverCache(), - } - - return is, nil + return &Source{SourceOpt: opt}, nil } // ID returns image scheme identifier @@ -78,16 +72,6 @@ func (is *Source) ID() string { return source.DockerImageScheme } -func (is *Source) getResolver(hosts docker.RegistryHosts, ref string, sm *session.Manager, g session.Group) remotes.Resolver { - if res := is.resolverCache.Get(ref, g); res != nil { - return res - } - auth := resolver.NewSessionAuthenticator(sm, g) - r := resolver.New(hosts, auth) - r = is.resolverCache.Add(ref, auth, r, g) - return r -} - func (is *Source) resolveLocal(refStr string) (*image.Image, error) { ref, err := distreference.ParseNormalizedNamed(refStr) if err != nil { @@ -109,8 +93,15 @@ func (is *Source) resolveRemote(ctx context.Context, ref string, platform *ocisp dgst digest.Digest dt []byte } - res, err := is.g.Do(ctx, ref, func(ctx context.Context) (interface{}, error) { - dgst, dt, err := imageutil.Config(ctx, ref, is.getResolver(is.RegistryHosts, ref, sm, g), is.ContentStore, nil, platform) + p := platforms.DefaultSpec() + if platform != nil { + p = *platform + } + // key is used to synchronize resolutions that can happen in parallel when doing multi-stage. + key := "getconfig::" + ref + "::" + platforms.Format(p) + res, err := is.g.Do(ctx, key, func(ctx context.Context) (interface{}, error) { + res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g) + dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, nil, platform) if err != nil { return nil, err } @@ -168,7 +159,7 @@ func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.Re } // Resolve returns access to pulling for an identifier -func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager) (source.SourceInstance, error) { +func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) { imageIdentifier, ok := id.(*source.ImageIdentifier) if !ok { return nil, errors.Errorf("invalid image identifier %v", id) @@ -191,29 +182,20 @@ func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session type puller struct { is *Source - resolveOnce sync.Once resolveLocalOnce sync.Once src *source.ImageIdentifier desc ocispec.Descriptor ref string - resolveErr error - resolverInstance remotes.Resolver - resolverOnce sync.Once config []byte platform ocispec.Platform sm *session.Manager } func (p *puller) resolver(g session.Group) remotes.Resolver { - p.resolverOnce.Do(func() { - if p.resolverInstance == nil { - p.resolverInstance = p.is.getResolver(p.is.RegistryHosts, p.src.Reference.String(), p.sm, g) - } - }) - return p.resolverInstance + return resolver.DefaultPool.GetResolver(p.is.RegistryHosts, p.src.Reference.String(), "pull", p.sm, g) } -func (p *puller) mainManifestKey(dgst digest.Digest, platform ocispec.Platform) (digest.Digest, error) { +func (p *puller) mainManifestKey(platform ocispec.Platform) (digest.Digest, error) { dt, err := json.Marshal(struct { Digest digest.Digest OS string @@ -271,22 +253,23 @@ func (p *puller) resolveLocal() { } func (p *puller) resolve(ctx context.Context, g session.Group) error { - p.resolveOnce.Do(func() { + // key is used to synchronize resolutions that can happen in parallel when doing multi-stage. + key := "resolve::" + p.ref + "::" + platforms.Format(p.platform) + _, err := p.is.g.Do(ctx, key, func(ctx context.Context) (_ interface{}, err error) { resolveProgressDone := oneOffProgress(ctx, "resolve "+p.src.Reference.String()) + defer func() { + resolveProgressDone(err) + }() ref, err := distreference.ParseNormalizedNamed(p.src.Reference.String()) if err != nil { - p.resolveErr = err - _ = resolveProgressDone(err) - return + return nil, err } if p.desc.Digest == "" && p.config == nil { origRef, desc, err := p.resolver(g).Resolve(ctx, ref.String()) if err != nil { - p.resolveErr = err - _ = resolveProgressDone(err) - return + return nil, err } p.desc = desc @@ -301,65 +284,61 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error { if p.config == nil && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest { ref, err := distreference.WithDigest(ref, p.desc.Digest) if err != nil { - p.resolveErr = err - _ = resolveProgressDone(err) - return + return nil, err } _, dt, err := p.is.ResolveImageConfig(ctx, ref.String(), llb.ResolveImageConfigOpt{Platform: &p.platform, ResolveMode: resolveModeToString(p.src.ResolveMode)}, p.sm, g) if err != nil { - p.resolveErr = err - _ = resolveProgressDone(err) - return + return nil, err } p.config = dt } - _ = resolveProgressDone(nil) + return nil, nil }) - return p.resolveErr + return err } -func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (string, bool, error) { +func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (string, solver.CacheOpts, bool, error) { p.resolveLocal() if p.desc.Digest != "" && index == 0 { - dgst, err := p.mainManifestKey(p.desc.Digest, p.platform) + dgst, err := p.mainManifestKey(p.platform) if err != nil { - return "", false, err + return "", nil, false, err } - return dgst.String(), false, nil + return dgst.String(), nil, false, nil } if p.config != nil { k := cacheKeyFromConfig(p.config).String() if k == "" { - return digest.FromBytes(p.config).String(), true, nil + return digest.FromBytes(p.config).String(), nil, true, nil } - return k, true, nil + return k, nil, true, nil } if err := p.resolve(ctx, g); err != nil { - return "", false, err + return "", nil, false, err } if p.desc.Digest != "" && index == 0 { - dgst, err := p.mainManifestKey(p.desc.Digest, p.platform) + dgst, err := p.mainManifestKey(p.platform) if err != nil { - return "", false, err + return "", nil, false, err } - return dgst.String(), false, nil + return dgst.String(), nil, false, nil } k := cacheKeyFromConfig(p.config).String() if k == "" { - dgst, err := p.mainManifestKey(p.desc.Digest, p.platform) + dgst, err := p.mainManifestKey(p.platform) if err != nil { - return "", false, err + return "", nil, false, err } - return dgst.String(), true, nil + return dgst.String(), nil, true, nil } - return k, true, nil + return k, nil, true, nil } func (p *puller) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) { @@ -426,10 +405,6 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.Immutable } platform := platforms.Only(p.platform) - // workaround for GCR bug that requires a request to manifest endpoint for authentication to work. - // if current resolver has not used manifests do a dummy request. - // in most cases resolver should be cached and extra request is not needed. - ensureManifestRequested(ctx, p.resolver(g), p.ref) var ( schema1Converter *schema1.Converter @@ -845,97 +820,6 @@ func resolveModeToString(rm source.ResolveMode) string { return "" } -type resolverCache struct { - mu sync.Mutex - m map[string]cachedResolver -} - -type cachedResolver struct { - counter int64 // needs to be 64bit aligned for 32bit systems - timeout time.Time - remotes.Resolver - auth *resolver.SessionAuthenticator -} - -func (cr *cachedResolver) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) { - atomic.AddInt64(&cr.counter, 1) - return cr.Resolver.Resolve(ctx, ref) -} - -func (r *resolverCache) Add(ref string, auth *resolver.SessionAuthenticator, resolver remotes.Resolver, g session.Group) *cachedResolver { - r.mu.Lock() - defer r.mu.Unlock() - - ref = r.repo(ref) - - cr, ok := r.m[ref] - cr.timeout = time.Now().Add(time.Minute) - if ok { - cr.auth.AddSession(g) - return &cr - } - - cr.Resolver = resolver - cr.auth = auth - r.m[ref] = cr - return &cr -} - -func (r *resolverCache) repo(refStr string) string { - ref, err := distreference.ParseNormalizedNamed(refStr) - if err != nil { - return refStr - } - return ref.Name() -} - -func (r *resolverCache) Get(ref string, g session.Group) *cachedResolver { - r.mu.Lock() - defer r.mu.Unlock() - - ref = r.repo(ref) - - cr, ok := r.m[ref] - if ok { - cr.auth.AddSession(g) - return &cr - } - return nil -} - -func (r *resolverCache) clean(now time.Time) { - r.mu.Lock() - for k, cr := range r.m { - if now.After(cr.timeout) { - delete(r.m, k) - } - } - r.mu.Unlock() -} - -func newResolverCache() *resolverCache { - rc := &resolverCache{ - m: map[string]cachedResolver{}, - } - t := time.NewTicker(time.Minute) - go func() { - for { - rc.clean(<-t.C) - } - }() - return rc -} - -func ensureManifestRequested(ctx context.Context, res remotes.Resolver, ref string) { - cr, ok := res.(*cachedResolver) - if !ok { - return - } - if atomic.LoadInt64(&cr.counter) == 0 { - res.Resolve(ctx, ref) - } -} - func platformMatches(img *image.Image, p *ocispec.Platform) bool { if img.Architecture != p.Architecture { return false diff --git a/builder/builder-next/controller.go b/builder/builder-next/controller.go index 5decff89f689b69cbd50b2896e7134b8515f739b..5aac0cb538c900c3a3c81209f82fa6ccb4428ec2 100644 --- a/builder/builder-next/controller.go +++ b/builder/builder-next/controller.go @@ -34,7 +34,7 @@ import ( "github.com/moby/buildkit/frontend/gateway/forwarder" containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" "github.com/moby/buildkit/solver/bboltcachestorage" - "github.com/moby/buildkit/util/binfmt_misc" + "github.com/moby/buildkit/util/archutil" "github.com/moby/buildkit/util/entitlements" "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/worker" @@ -166,7 +166,7 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) { return nil, errors.Errorf("snapshotter doesn't support differ") } - p, err := parsePlatforms(binfmt_misc.SupportedPlatforms(true)) + p, err := parsePlatforms(archutil.SupportedPlatforms(true)) if err != nil { return nil, err } diff --git a/builder/builder-next/executor_unix.go b/builder/builder-next/executor_unix.go index d684b9f6e2e6ca818ef183c6d4b09ec61d29013c..c052ec707fec14e61677137c9da30a376830bea1 100644 --- a/builder/builder-next/executor_unix.go +++ b/builder/builder-next/executor_unix.go @@ -95,11 +95,11 @@ func (iface *lnInterface) init(c libnetwork.NetworkController, n libnetwork.Netw iface.ep = ep } -func (iface *lnInterface) Set(s *specs.Spec) { +func (iface *lnInterface) Set(s *specs.Spec) error { <-iface.ready if iface.err != nil { logrus.WithError(iface.err).Error("failed to set networking spec") - return + return iface.err } shortNetCtlrID := stringid.TruncateID(iface.provider.NetworkController.ID()) // attach netns to bridge within the container namespace, using reexec in a prestart hook @@ -109,6 +109,7 @@ func (iface *lnInterface) Set(s *specs.Spec) { Args: []string{"libnetwork-setkey", "-exec-root=" + iface.provider.Config().Daemon.ExecRoot, iface.sbx.ContainerID(), shortNetCtlrID}, }}, } + return nil } func (iface *lnInterface) Close() error { diff --git a/builder/builder-next/executor_windows.go b/builder/builder-next/executor_windows.go index 5f33bcbe44bed00a4ac71232a883189282e9d9df..f63d8aba9ec1d125c4b49c877ef0ded6ed192c65 100644 --- a/builder/builder-next/executor_windows.go +++ b/builder/builder-next/executor_windows.go @@ -7,7 +7,6 @@ import ( "github.com/docker/docker/daemon/config" "github.com/docker/docker/pkg/idtools" "github.com/docker/libnetwork" - "github.com/moby/buildkit/cache" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/executor/oci" ) @@ -19,7 +18,7 @@ func newExecutor(_, _ string, _ libnetwork.NetworkController, _ *oci.DNSConfig, type winExecutor struct { } -func (w *winExecutor) Run(ctx context.Context, id string, root cache.Mountable, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) { +func (w *winExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) { return errors.New("buildkit executor not implemented for windows") } diff --git a/builder/builder-next/exporter/writer.go b/builder/builder-next/exporter/writer.go index 64d260f230ec37fcbf7b298bb89a8b1e34f539dc..53305caf8543af7922d3e901e9f3ce8a10e7e57a 100644 --- a/builder/builder-next/exporter/writer.go +++ b/builder/builder-next/exporter/writer.go @@ -26,7 +26,7 @@ func emptyImageConfig() ([]byte, error) { } img.RootFS.Type = "layers" img.Config.WorkingDir = "/" - img.Config.Env = []string{"PATH=" + system.DefaultPathEnv} + img.Config.Env = []string{"PATH=" + system.DefaultPathEnvUnix} dt, err := json.Marshal(img) return dt, errors.Wrap(err, "failed to create empty image config") } diff --git a/builder/builder-next/worker/worker.go b/builder/builder-next/worker/worker.go index f8c3ef9dde61eed72be3bac495b964ea090f999c..76a4470fe01b52e0a39598431cd7918270bc75ad 100644 --- a/builder/builder-next/worker/worker.go +++ b/builder/builder-next/worker/worker.go @@ -33,13 +33,15 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/mounts" "github.com/moby/buildkit/solver/llbsolver/ops" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/source" "github.com/moby/buildkit/source/git" "github.com/moby/buildkit/source/http" "github.com/moby/buildkit/source/local" - "github.com/moby/buildkit/util/binfmt_misc" + "github.com/moby/buildkit/util/archutil" + "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/progress" digest "github.com/opencontainers/go-digest" @@ -147,7 +149,7 @@ func (w *Worker) Platforms(noCache bool) []ocispec.Platform { for _, p := range w.Opt.Platforms { pm[platforms.Format(p)] = struct{}{} } - for _, p := range binfmt_misc.SupportedPlatforms(noCache) { + for _, p := range archutil.SupportedPlatforms(noCache) { if _, ok := pm[p]; !ok { pp, _ := platforms.Parse(p) w.Opt.Platforms = append(w.Opt.Platforms, pp) @@ -170,13 +172,18 @@ func (w *Worker) ContentStore() content.Store { return w.Opt.ContentStore } +// MetadataStore returns the metadata store +func (w *Worker) MetadataStore() *metadata.Store { + return w.Opt.MetadataStore +} + // LoadRef loads a reference by ID -func (w *Worker) LoadRef(id string, hidden bool) (cache.ImmutableRef, error) { +func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) { var opts []cache.RefOption if hidden { opts = append(opts, cache.NoUpdateLastUsed) } - return w.CacheManager().Get(context.TODO(), id, opts...) + return w.CacheManager().Get(ctx, id, opts...) } // ResolveOp converts a LLB vertex into a LLB operation @@ -186,9 +193,9 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se case *pb.Op_Source: return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, sm, w) case *pb.Op_Exec: - return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), sm, w.MetadataStore, w.Executor(), w) + return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), sm, w.Opt.MetadataStore, w.Executor(), w) case *pb.Op_File: - return ops.NewFileOp(v, op, w.CacheManager(), w.MetadataStore, w) + return ops.NewFileOp(v, op, w.CacheManager(), w.Opt.MetadataStore, w) case *pb.Op_Build: return ops.NewBuildOp(v, op, s, w) } @@ -230,7 +237,7 @@ func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, } // GetRemote returns a remote snapshot reference for a local one -func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error) { +func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool, _ compression.Type, _ session.Group) (*solver.Remote, error) { var diffIDs []layer.DiffID var err error if !createIfNeeded { @@ -265,13 +272,13 @@ func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIf // PruneCacheMounts removes the current cache snapshots for specified IDs func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error { - mu := ops.CacheMountsLocker() + mu := mounts.CacheMountsLocker() mu.Lock() defer mu.Unlock() for _, id := range ids { id = "cache-dir:" + id - sis, err := w.MetadataStore.Search(id) + sis, err := w.Opt.MetadataStore.Search(id) if err != nil { return err } @@ -300,7 +307,7 @@ func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error { } } - ops.ClearActiveCacheMounts() + mounts.ClearActiveCacheMounts() return nil } diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go index d8f48ae2c08d0a9797320bfd5f1f0d51fbd4dc61..a0bfb289c288e1ac89f411bafd174a03dae04084 100644 --- a/builder/dockerfile/builder.go +++ b/builder/dockerfile/builder.go @@ -235,8 +235,10 @@ func processMetaArg(meta instructions.ArgCommand, shlex *shell.Lex, args *BuildA }); err != nil { return err } - args.AddArg(meta.Key, meta.Value) - args.AddMetaArg(meta.Key, meta.Value) + for _, arg := range meta.Args { + args.AddArg(arg.Key, arg.Value) + args.AddMetaArg(arg.Key, arg.Value) + } return nil } diff --git a/builder/dockerfile/dispatchers.go b/builder/dockerfile/dispatchers.go index bf28a66a49ad80b3343efaf0c03eb6fe8a4bb49e..36335df952f6b4942cfabaea71ec63be2182ce06 100644 --- a/builder/dockerfile/dispatchers.go +++ b/builder/dockerfile/dispatchers.go @@ -587,14 +587,21 @@ func dispatchStopSignal(d dispatchRequest, c *instructions.StopSignalCommand) er // to builder using the --build-arg flag for expansion/substitution or passing to 'run'. // Dockerfile author may optionally set a default value of this variable. func dispatchArg(d dispatchRequest, c *instructions.ArgCommand) error { - - commitStr := "ARG " + c.Key - if c.Value != nil { - commitStr += "=" + *c.Value + var commitStr strings.Builder + commitStr.WriteString("ARG ") + for i, arg := range c.Args { + if i > 0 { + commitStr.WriteString(" ") + } + commitStr.WriteString(arg.Key) + if arg.Value != nil { + commitStr.WriteString("=") + commitStr.WriteString(*arg.Value) + } + d.state.buildArgs.AddArg(arg.Key, arg.Value) } - d.state.buildArgs.AddArg(c.Key, c.Value) - return d.builder.commit(d.state, commitStr) + return d.builder.commit(d.state, commitStr.String()) } // SHELL powershell -command diff --git a/builder/dockerfile/dispatchers_test.go b/builder/dockerfile/dispatchers_test.go index 03e0245c8cb8b1904f676d90eb61ffe6efdef617..d5f6bb96a988841ec9519b1c0e8461988e93e045 100644 --- a/builder/dockerfile/dispatchers_test.go +++ b/builder/dockerfile/dispatchers_test.go @@ -139,10 +139,10 @@ func TestFromWithArg(t *testing.T) { args := NewBuildArgs(make(map[string]*string)) val := "sometag" - metaArg := instructions.ArgCommand{KeyValuePairOptional: instructions.KeyValuePairOptional{ + metaArg := instructions.ArgCommand{Args: []instructions.KeyValuePairOptional{{ Key: "THETAG", Value: &val, - }} + }}} cmd := &instructions.Stage{ BaseName: "alpine:${THETAG}", } @@ -395,7 +395,7 @@ func TestArg(t *testing.T) { argName := "foo" argVal := "bar" - cmd := &instructions.ArgCommand{KeyValuePairOptional: instructions.KeyValuePairOptional{Key: argName, Value: &argVal}} + cmd := &instructions.ArgCommand{Args: []instructions.KeyValuePairOptional{{Key: argName, Value: &argVal}}} err := dispatch(sb, cmd) assert.NilError(t, err) diff --git a/builder/dockerignore/deprecated.go b/builder/dockerignore/deprecated.go new file mode 100644 index 0000000000000000000000000000000000000000..e387cc8ed24d9bdf1322110f8218d563a3228754 --- /dev/null +++ b/builder/dockerignore/deprecated.go @@ -0,0 +1,17 @@ +// Package dockerignore is deprecated. Use github.com/moby/buildkit/frontend/dockerfile/dockerignore instead. +package dockerignore + +import ( + "io" + + "github.com/moby/buildkit/frontend/dockerfile/dockerignore" +) + +// ReadAll reads a .dockerignore file and returns the list of file patterns +// to ignore. Note this will trim whitespace from each line as well +// as use GO's "clean" func to get the shortest/cleanest path for each. +// +// Deprecated: use github.com/moby/buildkit/frontend/dockerfile/dockerignore.ReadAll instead. +func ReadAll(reader io.Reader) ([]string, error) { + return dockerignore.ReadAll(reader) +} diff --git a/builder/dockerignore/dockerignore_test.go b/builder/dockerignore/dockerignore_test.go deleted file mode 100644 index 655bd6f3321b0280895c443cd3350689c1d30d78..0000000000000000000000000000000000000000 --- a/builder/dockerignore/dockerignore_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package dockerignore // import "github.com/docker/docker/builder/dockerignore" - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestReadAll(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "dockerignore-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - di, err := ReadAll(nil) - if err != nil { - t.Fatalf("Expected not to have error, got %v", err) - } - - if diLen := len(di); diLen != 0 { - t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen) - } - - diName := filepath.Join(tmpDir, ".dockerignore") - content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile\n# this is a comment\n! /inverted/abs/path\n!\n! \n") - err = ioutil.WriteFile(diName, []byte(content), 0777) - if err != nil { - t.Fatal(err) - } - - diFd, err := os.Open(diName) - if err != nil { - t.Fatal(err) - } - defer diFd.Close() - - di, err = ReadAll(diFd) - if err != nil { - t.Fatal(err) - } - - if len(di) != 7 { - t.Fatalf("Expected 7 entries, got %v", len(di)) - } - if di[0] != "test1" { - t.Fatal("First element is not test1") - } - if di[1] != "test2" { // according to https://docs.docker.com/engine/reference/builder/#dockerignore-file, /foo/bar should be treated as foo/bar - t.Fatal("Second element is not test2") - } - if di[2] != "a/file/here" { // according to https://docs.docker.com/engine/reference/builder/#dockerignore-file, /foo/bar should be treated as foo/bar - t.Fatal("Third element is not a/file/here") - } - if di[3] != "lastfile" { - t.Fatal("Fourth element is not lastfile") - } - if di[4] != "!inverted/abs/path" { - t.Fatal("Fifth element is not !inverted/abs/path") - } - if di[5] != "!" { - t.Fatalf("Sixth element is not !, but %s", di[5]) - } - if di[6] != "!" { - t.Fatalf("Seventh element is not !, but %s", di[6]) - } -} diff --git a/builder/remotecontext/detect.go b/builder/remotecontext/detect.go index 251fd089318291eab607c666d7f8e50d0366ed66..9b126ef7750bd7ff75ad778e56647ca810f14f38 100644 --- a/builder/remotecontext/detect.go +++ b/builder/remotecontext/detect.go @@ -11,10 +11,10 @@ import ( "github.com/containerd/continuity/driver" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerignore" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/urlutil" + "github.com/moby/buildkit/frontend/dockerfile/dockerignore" "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/pkg/errors" "github.com/sirupsen/logrus" diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 1acc49106b177849b54cb8a3065bd32156770e83..c7dc934d624e2c4977c3801825b6501f9322cbe8 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -5608,30 +5608,6 @@ func (s *DockerSuite) TestBuildWithExtraHostInvalidFormat(c *testing.T) { } -func (s *DockerSuite) TestBuildContChar(c *testing.T) { - name := "testbuildcontchar" - - buildImage(name, build.WithDockerfile(`FROM busybox\`)).Assert(c, icmd.Expected{ - Out: "Step 1/1 : FROM busybox", - }) - - result := buildImage(name, build.WithDockerfile(`FROM busybox - RUN echo hi \`)) - result.Assert(c, icmd.Success) - assert.Assert(c, strings.Contains(result.Combined(), "Step 1/2 : FROM busybox")) - assert.Assert(c, strings.Contains(result.Combined(), "Step 2/2 : RUN echo hi\n")) - result = buildImage(name, build.WithDockerfile(`FROM busybox - RUN echo hi \\`)) - result.Assert(c, icmd.Success) - assert.Assert(c, strings.Contains(result.Combined(), "Step 1/2 : FROM busybox")) - assert.Assert(c, strings.Contains(result.Combined(), "Step 2/2 : RUN echo hi \\\n")) - result = buildImage(name, build.WithDockerfile(`FROM busybox - RUN echo hi \\\`)) - result.Assert(c, icmd.Success) - assert.Assert(c, strings.Contains(result.Combined(), "Step 1/2 : FROM busybox")) - assert.Assert(c, strings.Contains(result.Combined(), "Step 2/2 : RUN echo hi \\\\\n")) -} - func (s *DockerSuite) TestBuildMultiStageCopyFromSyntax(c *testing.T) { dockerfile := ` FROM busybox AS first diff --git a/vendor.conf b/vendor.conf index 4596d960d5195b0af5ffa096eeed79a4f5d59d08..f389f5e7677f52b0e1f1c218171a84129289c60e 100644 --- a/vendor.conf +++ b/vendor.conf @@ -33,13 +33,14 @@ github.com/imdario/mergo 1afb36080aec31e0d1528973ebe6 golang.org/x/sync cd5d95a43a6e21273425c7ae415d3df9ea832eeb # buildkit -github.com/moby/buildkit 4d1f260e8490ec438ab66e08bb105577aca0ce06 -github.com/tonistiigi/fsutil ae3a8d753069d0f76fbee396457e8b6cfd7cb8c3 +github.com/moby/buildkit 6861f17f15364de0fe1fd1e6e8da07598a485123 +github.com/tonistiigi/fsutil c3ed55f3b48161fd3dc42c17ba09e12ac52d57dc +github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e75b9e3569de2 github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746 -github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7 +github.com/opentracing/opentracing-go d34af3eaa63c4d08ab54863a4bdd0daa45212e12 # v1.2.0 github.com/google/shlex e7afc7fbc51079733e9468cdfd1efcd7d196cd1d -github.com/opentracing-contrib/go-stdlib b1a47cfbdd7543e70e9ef3e73d0802ad306cc1cc -github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b +github.com/opentracing-contrib/go-stdlib 8a6ff1ad1691a29e4f7b5d46604f97634997c8c4 # v1.0.0 +github.com/mitchellh/hashstructure a38c50148365edc8df43c1580c48fb2b3a1e9cd7 # v1.0.0 github.com/gofrs/flock 6caa7350c26b838538005fae7dbee4e69d9398db # v0.7.3 github.com/grpc-ecosystem/go-grpc-middleware 3c51f7f332123e8be5a157c0802a228ac85bf9db # v1.2.0 @@ -129,12 +130,12 @@ github.com/googleapis/gax-go bd5b16380fd03dc758d11cef74ba google.golang.org/genproto 3f1135a288c9a07e340ae8ba4cc6c7065a3160e8 # containerd -github.com/containerd/containerd c623d1b36f09f8ef6536a057bd658b3aa8632828 # v1.4.1 +github.com/containerd/containerd d4e78200d6da62480c85bf6f26b7221ea938f396 github.com/containerd/fifo f15a3290365b9d2627d189e619ab4008e0069caf github.com/containerd/continuity efbc4488d8fe1bdc16bde3b2d2990d9b3a899165 github.com/containerd/cgroups 318312a373405e5e91134d8063d04d59768a1bff github.com/containerd/console 5d7e1412f07b502a01029ea20e20e0d2be31fa7c # v1.0.1 -github.com/containerd/go-runc 7016d3ce2328dd2cb1192b2076ebd565c4e8df0c +github.com/containerd/go-runc 16b287bc67d069a60fa48db15f330b790b74365b github.com/containerd/typeurl cd3ce7159eae562a4f60ceff37dada11a939d247 # v1.0.1 github.com/containerd/ttrpc 72bb1b21c5b0a4a107f59dd85f6ab58e564b68d6 # v1.0.1 github.com/gogo/googleapis 01e0f9cca9b92166042241267ee2a5cdf5cff46c # v1.3.2 diff --git a/vendor/github.com/containerd/containerd/README.md b/vendor/github.com/containerd/containerd/README.md index a973d5156ea2080631b343b7a437fa6bdbc78f38..320c1121aeb9cbdb9ff4b534e4f485f1d26b79ef 100644 --- a/vendor/github.com/containerd/containerd/README.md +++ b/vendor/github.com/containerd/containerd/README.md @@ -10,10 +10,24 @@ containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc. +containerd is a member of CNCF with ['graduated'](https://landscape.cncf.io/selected=containerd) status. + containerd is designed to be embedded into a larger system, rather than being used directly by developers or end-users. ![architecture](design/architecture.png) +## Now Recruiting + +We are a large inclusive OSS project that is welcoming help of any kind shape or form: +* Documentation help is needed to make the product easier to consume and extend. +* We need OSS community outreach / organizing help to get the word out; manage +and create messaging and educational content; and to help with social media, community forums/groups, and google groups. +* We are actively inviting new [security advisors](https://github.com/containerd/project/blob/master/GOVERNANCE.md#security-advisors) to join the team. +* New sub-projects are being created, core and non-core that could use additional development help. +* Each of the [containerd projects](https://github.com/containerd) has a list of issues currently being worked on or that need help resolving. + - If the issue has not already been assigned to someone, or has not made recent progress and you are interested, please inquire. + - If you are interested in starting with a smaller / beginner level issue, look for issues with an `exp/beginner` tag, for example [containerd/containerd beginner issues.](https://github.com/containerd/containerd/issues?q=is%3Aissue+is%3Aopen+label%3Aexp%2Fbeginner) + ## Getting Started See our documentation on [containerd.io](https://containerd.io): @@ -250,10 +264,7 @@ loaded for the user's shell environment. For async communication and long running discussions please use issues and pull requests on the github repo. This will be the best place to discuss design and implementation. -For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development. - -**Slack:** Catch us in the #containerd and #containerd-dev channels on dockercommunity.slack.com. -[Click here for an invite to docker community slack.](https://dockr.ly/slack) +For sync communication catch us in the `#containerd` and `#containerd-dev` slack channels on Cloud Native Computing Foundation's (CNCF) slack - `cloud-native.slack.com`. Everyone is welcome to join and chat. [Get Invite to CNCF slack.](https://slack.cncf.io) ### Security audit diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go b/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go new file mode 100644 index 0000000000000000000000000000000000000000..7a05e3933ea85718b77487cac2be579e873b9009 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go @@ -0,0 +1,206 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package auth + +import ( + "context" + "encoding/json" + "net/http" + "net/url" + "strings" + "time" + + "github.com/containerd/containerd/log" + remoteserrors "github.com/containerd/containerd/remotes/errors" + "github.com/pkg/errors" + "golang.org/x/net/context/ctxhttp" +) + +var ( + // ErrNoToken is returned if a request is successful but the body does not + // contain an authorization token. + ErrNoToken = errors.New("authorization server did not include a token in the response") +) + +// GenerateTokenOptions generates options for fetching a token based on a challenge +func GenerateTokenOptions(ctx context.Context, host, username, secret string, c Challenge) (TokenOptions, error) { + realm, ok := c.Parameters["realm"] + if !ok { + return TokenOptions{}, errors.New("no realm specified for token auth challenge") + } + + realmURL, err := url.Parse(realm) + if err != nil { + return TokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm") + } + + to := TokenOptions{ + Realm: realmURL.String(), + Service: c.Parameters["service"], + Username: username, + Secret: secret, + } + + scope, ok := c.Parameters["scope"] + if ok { + to.Scopes = append(to.Scopes, scope) + } else { + log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge") + } + + return to, nil +} + +// TokenOptions are optios for requesting a token +type TokenOptions struct { + Realm string + Service string + Scopes []string + Username string + Secret string +} + +// OAuthTokenResponse is response from fetching token with a OAuth POST request +type OAuthTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` +} + +// FetchTokenWithOAuth fetches a token using a POST request +func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.Header, clientID string, to TokenOptions) (*OAuthTokenResponse, error) { + form := url.Values{} + if len(to.Scopes) > 0 { + form.Set("scope", strings.Join(to.Scopes, " ")) + } + form.Set("service", to.Service) + form.Set("client_id", clientID) + + if to.Username == "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", to.Secret) + } else { + form.Set("grant_type", "password") + form.Set("username", to.Username) + form.Set("password", to.Secret) + } + + req, err := http.NewRequest("POST", to.Realm, strings.NewReader(form.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + if headers != nil { + for k, v := range headers { + req.Header[k] = append(req.Header[k], v...) + } + } + + resp, err := ctxhttp.Do(ctx, client, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp)) + } + + decoder := json.NewDecoder(resp.Body) + + var tr OAuthTokenResponse + if err = decoder.Decode(&tr); err != nil { + return nil, errors.Wrap(err, "unable to decode token response") + } + + if tr.AccessToken == "" { + return nil, errors.WithStack(ErrNoToken) + } + + return &tr, nil +} + +// FetchTokenResponse is response from fetching token with GET request +type FetchTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +// FetchToken fetches a token using a GET request +func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) { + req, err := http.NewRequest("GET", to.Realm, nil) + if err != nil { + return nil, err + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = append(req.Header[k], v...) + } + } + + reqParams := req.URL.Query() + + if to.Service != "" { + reqParams.Add("service", to.Service) + } + + for _, scope := range to.Scopes { + reqParams.Add("scope", scope) + } + + if to.Secret != "" { + req.SetBasicAuth(to.Username, to.Secret) + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := ctxhttp.Do(ctx, client, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp)) + } + + decoder := json.NewDecoder(resp.Body) + + var tr FetchTokenResponse + if err = decoder.Decode(&tr); err != nil { + return nil, errors.Wrap(err, "unable to decode token response") + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken + } + + if tr.Token == "" { + return nil, errors.WithStack(ErrNoToken) + } + + return &tr, nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth.go b/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go similarity index 81% rename from vendor/github.com/containerd/containerd/remotes/docker/auth.go rename to vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go index 70cfdea4f3c61e5341f9ebdf8024c4d64558dbd5..223fa2d0524f03babcde2e0b1277d486af90d5e1 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/auth.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go @@ -14,7 +14,7 @@ limitations under the License. */ -package docker +package auth import ( "net/http" @@ -22,31 +22,35 @@ import ( "strings" ) -type authenticationScheme byte +// AuthenticationScheme defines scheme of the authentication method +type AuthenticationScheme byte const ( - basicAuth authenticationScheme = 1 << iota // Defined in RFC 7617 - digestAuth // Defined in RFC 7616 - bearerAuth // Defined in RFC 6750 + // BasicAuth is scheme for Basic HTTP Authentication RFC 7617 + BasicAuth AuthenticationScheme = 1 << iota + // DigestAuth is scheme for HTTP Digest Access Authentication RFC 7616 + DigestAuth + // BearerAuth is scheme for OAuth 2.0 Bearer Tokens RFC 6750 + BearerAuth ) -// challenge carries information from a WWW-Authenticate response header. +// Challenge carries information from a WWW-Authenticate response header. // See RFC 2617. -type challenge struct { +type Challenge struct { // scheme is the auth-scheme according to RFC 2617 - scheme authenticationScheme + Scheme AuthenticationScheme // parameters are the auth-params according to RFC 2617 - parameters map[string]string + Parameters map[string]string } -type byScheme []challenge +type byScheme []Challenge func (bs byScheme) Len() int { return len(bs) } func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] } // Sort in priority order: token > digest > basic -func (bs byScheme) Less(i, j int) bool { return bs[i].scheme > bs[j].scheme } +func (bs byScheme) Less(i, j int) bool { return bs[i].Scheme > bs[j].Scheme } // Octet types from RFC 2616. type octetType byte @@ -90,22 +94,23 @@ func init() { } } -func parseAuthHeader(header http.Header) []challenge { - challenges := []challenge{} +// ParseAuthHeader parses challenges from WWW-Authenticate header +func ParseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { v, p := parseValueAndParams(h) - var s authenticationScheme + var s AuthenticationScheme switch v { case "basic": - s = basicAuth + s = BasicAuth case "digest": - s = digestAuth + s = DigestAuth case "bearer": - s = bearerAuth + s = BearerAuth default: continue } - challenges = append(challenges, challenge{scheme: s, parameters: p}) + challenges = append(challenges, Challenge{Scheme: s, Parameters: p}) } sort.Stable(byScheme(challenges)) return challenges diff --git a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go index 001423a0d1ceb33febc350034f6259094554d349..67e4aea8da8f5f75ef279e15f76becada2b7888c 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go @@ -19,21 +19,17 @@ package docker import ( "context" "encoding/base64" - "encoding/json" "fmt" - "io" - "io/ioutil" "net/http" - "net/url" "strings" "sync" - "time" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes/docker/auth" + remoteerrors "github.com/containerd/containerd/remotes/errors" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "golang.org/x/net/context/ctxhttp" ) type dockerAuthorizer struct { @@ -135,8 +131,8 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R a.mu.Lock() defer a.mu.Unlock() - for _, c := range parseAuthHeader(last.Header) { - if c.scheme == bearerAuth { + for _, c := range auth.ParseAuthHeader(last.Header) { + if c.Scheme == auth.BearerAuth { if err := invalidAuthorization(c, responses); err != nil { delete(a.handlers, host) return err @@ -152,26 +148,35 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R return nil } - common, err := a.generateTokenOptions(ctx, host, c) + var username, secret string + if a.credentials != nil { + var err error + username, secret, err = a.credentials(host) + if err != nil { + return err + } + } + + common, err := auth.GenerateTokenOptions(ctx, host, username, secret, c) if err != nil { return err } - a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common) + a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common) return nil - } else if c.scheme == basicAuth && a.credentials != nil { + } else if c.Scheme == auth.BasicAuth && a.credentials != nil { username, secret, err := a.credentials(host) if err != nil { return err } if username != "" && secret != "" { - common := tokenOptions{ - username: username, - secret: secret, + common := auth.TokenOptions{ + Username: username, + Secret: secret, } - a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common) + a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common) return nil } } @@ -179,38 +184,6 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") } -func (a *dockerAuthorizer) generateTokenOptions(ctx context.Context, host string, c challenge) (tokenOptions, error) { - realm, ok := c.parameters["realm"] - if !ok { - return tokenOptions{}, errors.New("no realm specified for token auth challenge") - } - - realmURL, err := url.Parse(realm) - if err != nil { - return tokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm") - } - - to := tokenOptions{ - realm: realmURL.String(), - service: c.parameters["service"], - } - - scope, ok := c.parameters["scope"] - if ok { - to.scopes = append(to.scopes, scope) - } else { - log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge") - } - - if a.credentials != nil { - to.username, to.secret, err = a.credentials(host) - if err != nil { - return tokenOptions{}, err - } - } - return to, nil -} - // authResult is used to control limit rate. type authResult struct { sync.WaitGroup @@ -227,17 +200,17 @@ type authHandler struct { client *http.Client // only support basic and bearer schemes - scheme authenticationScheme + scheme auth.AuthenticationScheme // common contains common challenge answer - common tokenOptions + common auth.TokenOptions // scopedTokens caches token indexed by scopes, which used in // bearer auth case scopedTokens map[string]*authResult } -func newAuthHandler(client *http.Client, hdr http.Header, scheme authenticationScheme, opts tokenOptions) *authHandler { +func newAuthHandler(client *http.Client, hdr http.Header, scheme auth.AuthenticationScheme, opts auth.TokenOptions) *authHandler { return &authHandler{ header: hdr, client: client, @@ -249,17 +222,17 @@ func newAuthHandler(client *http.Client, hdr http.Header, scheme authenticationS func (ah *authHandler) authorize(ctx context.Context) (string, error) { switch ah.scheme { - case basicAuth: + case auth.BasicAuth: return ah.doBasicAuth(ctx) - case bearerAuth: + case auth.BearerAuth: return ah.doBearerAuth(ctx) default: - return "", errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") + return "", errors.Wrapf(errdefs.ErrNotImplemented, "failed to find supported auth scheme: %s", string(ah.scheme)) } } func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) { - username, secret := ah.common.username, ah.common.secret + username, secret := ah.common.Username, ah.common.Secret if username == "" || secret == "" { return "", fmt.Errorf("failed to handle basic auth because missing username or secret") @@ -269,14 +242,14 @@ func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) { return fmt.Sprintf("Basic %s", auth), nil } -func (ah *authHandler) doBearerAuth(ctx context.Context) (string, error) { +func (ah *authHandler) doBearerAuth(ctx context.Context) (token string, err error) { // copy common tokenOptions to := ah.common - to.scopes = GetTokenScopes(ctx, to.scopes) + to.Scopes = GetTokenScopes(ctx, to.Scopes) // Docs: https://docs.docker.com/registry/spec/auth/scope - scoped := strings.Join(to.scopes, " ") + scoped := strings.Join(to.Scopes, " ") ah.Lock() if r, exist := ah.scopedTokens[scoped]; exist { @@ -291,174 +264,52 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (string, error) { ah.scopedTokens[scoped] = r ah.Unlock() + defer func() { + token = fmt.Sprintf("Bearer %s", token) + r.token, r.err = token, err + r.Done() + }() + // fetch token for the resource scope - var ( - token string - err error - ) - if to.secret != "" { + if to.Secret != "" { + defer func() { + err = errors.Wrap(err, "failed to fetch oauth token") + }() // credential information is provided, use oauth POST endpoint - token, err = ah.fetchTokenWithOAuth(ctx, to) - err = errors.Wrap(err, "failed to fetch oauth token") - } else { - // do request anonymously - token, err = ah.fetchToken(ctx, to) - err = errors.Wrap(err, "failed to fetch anonymous token") - } - token = fmt.Sprintf("Bearer %s", token) - - r.token, r.err = token, err - r.Done() - return r.token, r.err -} - -type tokenOptions struct { - realm string - service string - scopes []string - username string - secret string -} - -type postTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` -} - -func (ah *authHandler) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) { - form := url.Values{} - if len(to.scopes) > 0 { - form.Set("scope", strings.Join(to.scopes, " ")) - } - form.Set("service", to.service) - // TODO: Allow setting client_id - form.Set("client_id", "containerd-client") - - if to.username == "" { - form.Set("grant_type", "refresh_token") - form.Set("refresh_token", to.secret) - } else { - form.Set("grant_type", "password") - form.Set("username", to.username) - form.Set("password", to.secret) - } - - req, err := http.NewRequest("POST", to.realm, strings.NewReader(form.Encode())) - if err != nil { - return "", err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - if ah.header != nil { - for k, v := range ah.header { - req.Header[k] = append(req.Header[k], v...) - } - } - - resp, err := ctxhttp.Do(ctx, ah.client, req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - // Registries without support for POST may return 404 for POST /v2/token. - // As of September 2017, GCR is known to return 404. - // As of February 2018, JFrog Artifactory is known to return 401. - if (resp.StatusCode == 405 && to.username != "") || resp.StatusCode == 404 || resp.StatusCode == 401 { - return ah.fetchToken(ctx, to) - } else if resp.StatusCode < 200 || resp.StatusCode >= 400 { - b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB - log.G(ctx).WithFields(logrus.Fields{ - "status": resp.Status, - "body": string(b), - }).Debugf("token request failed") - // TODO: handle error body and write debug output - return "", errors.Errorf("unexpected status: %s", resp.Status) - } - - decoder := json.NewDecoder(resp.Body) - - var tr postTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", fmt.Errorf("unable to decode token response: %s", err) - } - - return tr.AccessToken, nil -} - -type getTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` -} - -// fetchToken fetches a token using a GET request -func (ah *authHandler) fetchToken(ctx context.Context, to tokenOptions) (string, error) { - req, err := http.NewRequest("GET", to.realm, nil) - if err != nil { - return "", err - } - - if ah.header != nil { - for k, v := range ah.header { - req.Header[k] = append(req.Header[k], v...) + // TODO: Allow setting client_id + resp, err := auth.FetchTokenWithOAuth(ctx, ah.client, ah.header, "containerd-client", to) + if err != nil { + var errStatus remoteerrors.ErrUnexpectedStatus + if errors.As(err, &errStatus) { + // Registries without support for POST may return 404 for POST /v2/token. + // As of September 2017, GCR is known to return 404. + // As of February 2018, JFrog Artifactory is known to return 401. + if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 { + resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) + if err != nil { + return "", err + } + return resp.Token, nil + } + log.G(ctx).WithFields(logrus.Fields{ + "status": errStatus.Status, + "body": string(errStatus.Body), + }).Debugf("token request failed") + } + return "", err } + return resp.AccessToken, nil } - - reqParams := req.URL.Query() - - if to.service != "" { - reqParams.Add("service", to.service) - } - - for _, scope := range to.scopes { - reqParams.Add("scope", scope) - } - - if to.secret != "" { - req.SetBasicAuth(to.username, to.secret) - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := ctxhttp.Do(ctx, ah.client, req) + // do request anonymously + resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) if err != nil { - return "", err - } - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - // TODO: handle error body and write debug output - return "", errors.Errorf("unexpected status: %s", resp.Status) + return "", errors.Wrap(err, "failed to fetch anonymous token") } - - decoder := json.NewDecoder(resp.Body) - - var tr getTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", fmt.Errorf("unable to decode token response: %s", err) - } - - // `access_token` is equivalent to `token` and if both are specified - // the choice is undefined. Canonicalize `access_token` by sticking - // things in `token`. - if tr.AccessToken != "" { - tr.Token = tr.AccessToken - } - - if tr.Token == "" { - return "", ErrNoToken - } - - return tr.Token, nil + return resp.Token, nil } -func invalidAuthorization(c challenge, responses []*http.Response) error { - errStr := c.parameters["error"] +func invalidAuthorization(c auth.Challenge, responses []*http.Response) error { + errStr := c.Parameters["error"] if errStr == "" { return nil } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go index 98ea515d591eabc7a28afd520548d7abcec6792f..13fc136859ece908115df3d0836824d09a3bcfcf 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go @@ -30,6 +30,7 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" "github.com/containerd/containerd/remotes" + remoteserrors "github.com/containerd/containerd/remotes/errors" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -112,8 +113,9 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) } } else if resp.StatusCode != http.StatusNotFound { - // TODO: log error - return nil, errors.Errorf("unexpected response: %s", resp.Status) + err := remoteserrors.NewUnexpectedStatusErr(resp) + log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + return nil, err } } @@ -166,8 +168,9 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten }) return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) default: - // TODO: log error - return nil, errors.Errorf("unexpected response: %s", resp.Status) + err := remoteserrors.NewUnexpectedStatusErr(resp) + log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + return nil, err } var ( @@ -244,8 +247,9 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten switch resp.StatusCode { case http.StatusOK, http.StatusCreated, http.StatusNoContent: default: - // TODO: log error - pr.CloseWithError(errors.Errorf("unexpected response: %s", resp.Status)) + err := remoteserrors.NewUnexpectedStatusErr(resp) + log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + pr.CloseWithError(err) } respC <- resp }() diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go index 53e42ecc5a5cfc3ca8599809fb067f6b3649282c..07640f23a27340425cce959f065453ab7b3e37e7 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -41,10 +41,6 @@ import ( ) var ( - // ErrNoToken is returned if a request is successful but the body does not - // contain an authorization token. - ErrNoToken = errors.New("authorization server did not include a token in the response") - // ErrInvalidAuthorization is used when credentials are passed to a server but // those credentials are rejected. ErrInvalidAuthorization = errors.New("authorization failed") diff --git a/vendor/github.com/containerd/containerd/remotes/errors/errors.go b/vendor/github.com/containerd/containerd/remotes/errors/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..e58e4afea84cab98ff5de1cff059a7b085fcb070 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/errors/errors.go @@ -0,0 +1,46 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errors + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" +) + +var _ error = ErrUnexpectedStatus{} + +// ErrUnexpectedStatus is returned if a registry API request returned with unexpected HTTP status +type ErrUnexpectedStatus struct { + Status string + StatusCode int + Body []byte +} + +func (e ErrUnexpectedStatus) Error() string { + return fmt.Sprintf("unexpected status: %s", e.Status) +} + +// NewUnexpectedStatusErr creates an ErrUnexpectedStatus from HTTP response +func NewUnexpectedStatusErr(resp *http.Response) error { + var b []byte + if resp.Body != nil { + b, _ = ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB + } + return ErrUnexpectedStatus{Status: resp.Status, StatusCode: resp.StatusCode, Body: b} +} diff --git a/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go b/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go index 9653454afcb68fb91a7a297d557ec8558895b142..562ee6ca487c8661a67aa078c1d6d13104d5cf4f 100644 --- a/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go +++ b/vendor/github.com/containerd/containerd/runtime/v1/shim/client/client.go @@ -22,7 +22,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "net" "os" "os/exec" @@ -68,24 +67,22 @@ func WithStart(binary, address, daemonAddress, cgroup string, debug bool, exitHa } defer f.Close() - stdoutCopy := ioutil.Discard - stderrCopy := ioutil.Discard - stdoutLog, err := v1.OpenShimStdoutLog(ctx, config.WorkDir) - if err != nil { - return nil, nil, errors.Wrapf(err, "failed to create stdout log") - } - - stderrLog, err := v1.OpenShimStderrLog(ctx, config.WorkDir) - if err != nil { - return nil, nil, errors.Wrapf(err, "failed to create stderr log") - } + var stdoutLog io.ReadWriteCloser + var stderrLog io.ReadWriteCloser if debug { - stdoutCopy = os.Stdout - stderrCopy = os.Stderr - } + stdoutLog, err = v1.OpenShimStdoutLog(ctx, config.WorkDir) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to create stdout log") + } + + stderrLog, err = v1.OpenShimStderrLog(ctx, config.WorkDir) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to create stderr log") + } - go io.Copy(stdoutCopy, stdoutLog) - go io.Copy(stderrCopy, stderrLog) + go io.Copy(os.Stdout, stdoutLog) + go io.Copy(os.Stderr, stderrLog) + } cmd, err := newCommand(binary, daemonAddress, debug, config, f, stdoutLog, stderrLog) if err != nil { diff --git a/vendor/github.com/containerd/containerd/vendor.conf b/vendor/github.com/containerd/containerd/vendor.conf index 59ec791489a50d794023510a05c888ade6f8f97f..ad72fb6d2c110c74807e116881b81fc2ef9527cc 100644 --- a/vendor/github.com/containerd/containerd/vendor.conf +++ b/vendor/github.com/containerd/containerd/vendor.conf @@ -7,6 +7,7 @@ github.com/containerd/console v1.0.0 github.com/containerd/continuity efbc4488d8fe1bdc16bde3b2d2990d9b3a899165 github.com/containerd/fifo f15a3290365b9d2627d189e619ab4008e0069caf github.com/containerd/go-runc 7016d3ce2328dd2cb1192b2076ebd565c4e8df0c +github.com/containerd/nri 0afc7f031eaf9c7d9c1a381b7ab5462e89c998fc github.com/containerd/ttrpc v1.0.1 github.com/containerd/typeurl v1.0.1 github.com/coreos/go-systemd/v22 v22.1.0 @@ -57,7 +58,7 @@ gotest.tools/v3 v3.0.2 github.com/cilium/ebpf 1c8d4c9ef7759622653a1d319284a44652333b28 # cri dependencies -github.com/containerd/cri 4e6644c8cf7fb825f62e0007421b7d83dfeab5a1 # master +github.com/containerd/cri 35e623e6bf7512e8c82b8ac6052cb1d720189f28 # master github.com/davecgh/go-spew v1.1.1 github.com/docker/docker 4634ce647cf2ce2c6031129ccd109e557244986f github.com/docker/spdystream 449fdfce4d962303d702fec724ef0ad181c92528 diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index 9a2354028867133baf715b52e799798332199340..3eb8c902bf47716141fa6428e5b408475bc374d5 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.4.1+unknown" + Version = "1.4.0+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/containerd/go-runc/go.mod b/vendor/github.com/containerd/go-runc/go.mod index d833ee160217c03d392a39ecf29e637b006b9d7a..f69c26fd68fb3173a5480fa9e2accca1115eeabc 100644 --- a/vendor/github.com/containerd/go-runc/go.mod +++ b/vendor/github.com/containerd/go-runc/go.mod @@ -3,8 +3,9 @@ module github.com/containerd/go-runc go 1.13 require ( - github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e - github.com/opencontainers/runtime-spec v1.0.1 - github.com/pkg/errors v0.8.1 - golang.org/x/sys v0.0.0-20191210023423-ac6580df4449 + github.com/containerd/console v1.0.1 + github.com/opencontainers/runtime-spec v1.0.2 + github.com/pkg/errors v0.9.1 + github.com/sirupsen/logrus v1.7.0 + golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f ) diff --git a/vendor/github.com/containerd/go-runc/io_unix.go b/vendor/github.com/containerd/go-runc/io_unix.go index 567cd072e5f8878a00aeb3f619b475f162ac2ecc..ccf1dd490d9ecc479a5068d86d3d458d438be0e5 100644 --- a/vendor/github.com/containerd/go-runc/io_unix.go +++ b/vendor/github.com/containerd/go-runc/io_unix.go @@ -20,7 +20,9 @@ package runc import ( "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/sys/unix" + "runtime" ) // NewPipeIO creates pipe pairs to be used with runc @@ -47,7 +49,13 @@ func NewPipeIO(uid, gid int, opts ...IOOpt) (i IO, err error) { } pipes = append(pipes, stdin) if err = unix.Fchown(int(stdin.r.Fd()), uid, gid); err != nil { - return nil, errors.Wrap(err, "failed to chown stdin") + // TODO: revert with proper darwin solution, skipping for now + // as darwin chown is returning EINVAL on anonymous pipe + if runtime.GOOS == "darwin" { + logrus.WithError(err).Debug("failed to chown stdin, ignored") + } else { + return nil, errors.Wrap(err, "failed to chown stdin") + } } } if option.OpenStdout { @@ -56,7 +64,13 @@ func NewPipeIO(uid, gid int, opts ...IOOpt) (i IO, err error) { } pipes = append(pipes, stdout) if err = unix.Fchown(int(stdout.w.Fd()), uid, gid); err != nil { - return nil, errors.Wrap(err, "failed to chown stdout") + // TODO: revert with proper darwin solution, skipping for now + // as darwin chown is returning EINVAL on anonymous pipe + if runtime.GOOS == "darwin" { + logrus.WithError(err).Debug("failed to chown stdout, ignored") + } else { + return nil, errors.Wrap(err, "failed to chown stdout") + } } } if option.OpenStderr { @@ -65,7 +79,13 @@ func NewPipeIO(uid, gid int, opts ...IOOpt) (i IO, err error) { } pipes = append(pipes, stderr) if err = unix.Fchown(int(stderr.w.Fd()), uid, gid); err != nil { - return nil, errors.Wrap(err, "failed to chown stderr") + // TODO: revert with proper darwin solution, skipping for now + // as darwin chown is returning EINVAL on anonymous pipe + if runtime.GOOS == "darwin" { + logrus.WithError(err).Debug("failed to chown stderr, ignored") + } else { + return nil, errors.Wrap(err, "failed to chown stderr") + } } } return &pipeIO{ diff --git a/vendor/github.com/containerd/go-runc/runc.go b/vendor/github.com/containerd/go-runc/runc.go index c3a95af25486029f4c379825daf2ecf9dd2c6a2a..f5f03ae95eb4ba91835d865bc4d7a5d7448a0dd1 100644 --- a/vendor/github.com/containerd/go-runc/runc.go +++ b/vendor/github.com/containerd/go-runc/runc.go @@ -29,7 +29,6 @@ import ( "path/filepath" "strconv" "strings" - "syscall" "time" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -55,24 +54,9 @@ const ( DefaultCommand = "runc" ) -// Runc is the client to the runc cli -type Runc struct { - //If command is empty, DefaultCommand is used - Command string - Root string - Debug bool - Log string - LogFormat Format - PdeathSignal syscall.Signal - Setpgid bool - Criu string - SystemdCgroup bool - Rootless *bool // nil stands for "auto" -} - // List returns all containers created inside the provided runc root directory func (r *Runc) List(context context.Context) ([]*Container, error) { - data, err := cmdOutput(r.command(context, "list", "--format=json"), false) + data, err := cmdOutput(r.command(context, "list", "--format=json"), false, nil) defer putBuf(data) if err != nil { return nil, err @@ -86,7 +70,7 @@ func (r *Runc) List(context context.Context) ([]*Container, error) { // State returns the state for the container provided by id func (r *Runc) State(context context.Context, id string) (*Container, error) { - data, err := cmdOutput(r.command(context, "state", id), true) + data, err := cmdOutput(r.command(context, "state", id), true, nil) defer putBuf(data) if err != nil { return nil, fmt.Errorf("%s: %s", err, data.String()) @@ -111,6 +95,7 @@ type CreateOpts struct { NoPivot bool NoNewKeyring bool ExtraFiles []*os.File + Started chan<- int } func (o *CreateOpts) args() (out []string, err error) { @@ -156,7 +141,7 @@ func (r *Runc) Create(context context.Context, id, bundle string, opts *CreateOp cmd.ExtraFiles = opts.ExtraFiles if cmd.Stdout == nil && cmd.Stderr == nil { - data, err := cmdOutput(cmd, true) + data, err := cmdOutput(cmd, true, nil) defer putBuf(data) if err != nil { return fmt.Errorf("%s: %s", err, data.String()) @@ -176,7 +161,7 @@ func (r *Runc) Create(context context.Context, id, bundle string, opts *CreateOp } status, err := Monitor.Wait(cmd, ec) if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) + err = fmt.Errorf("%s did not terminate successfully: %w", cmd.Args[0], &ExitError{status}) } return err } @@ -191,6 +176,7 @@ type ExecOpts struct { PidFile string ConsoleSocket ConsoleSocket Detach bool + Started chan<- int } func (o *ExecOpts) args() (out []string, err error) { @@ -210,9 +196,12 @@ func (o *ExecOpts) args() (out []string, err error) { return out, nil } -// Exec executres and additional process inside the container based on a full +// Exec executes an additional process inside the container based on a full // OCI Process specification func (r *Runc) Exec(context context.Context, id string, spec specs.Process, opts *ExecOpts) error { + if opts.Started != nil { + defer close(opts.Started) + } f, err := ioutil.TempFile(os.Getenv("XDG_RUNTIME_DIR"), "runc-process") if err != nil { return err @@ -236,10 +225,10 @@ func (r *Runc) Exec(context context.Context, id string, spec specs.Process, opts opts.Set(cmd) } if cmd.Stdout == nil && cmd.Stderr == nil { - data, err := cmdOutput(cmd, true) + data, err := cmdOutput(cmd, true, opts.Started) defer putBuf(data) if err != nil { - return fmt.Errorf("%s: %s", err, data.String()) + return fmt.Errorf("%w: %s", err, data.String()) } return nil } @@ -247,6 +236,9 @@ func (r *Runc) Exec(context context.Context, id string, spec specs.Process, opts if err != nil { return err } + if opts.Started != nil { + opts.Started <- cmd.Process.Pid + } if opts != nil && opts.IO != nil { if c, ok := opts.IO.(StartCloser); ok { if err := c.CloseAfterStart(); err != nil { @@ -256,7 +248,7 @@ func (r *Runc) Exec(context context.Context, id string, spec specs.Process, opts } status, err := Monitor.Wait(cmd, ec) if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) + err = fmt.Errorf("%s did not terminate successfully: %w", cmd.Args[0], &ExitError{status}) } return err } @@ -264,6 +256,9 @@ func (r *Runc) Exec(context context.Context, id string, spec specs.Process, opts // Run runs the create, start, delete lifecycle of the container // and returns its exit status after it has exited func (r *Runc) Run(context context.Context, id, bundle string, opts *CreateOpts) (int, error) { + if opts.Started != nil { + defer close(opts.Started) + } args := []string{"run", "--bundle", bundle} if opts != nil { oargs, err := opts.args() @@ -280,9 +275,12 @@ func (r *Runc) Run(context context.Context, id, bundle string, opts *CreateOpts) if err != nil { return -1, err } + if opts.Started != nil { + opts.Started <- cmd.Process.Pid + } status, err := Monitor.Wait(cmd, ec) if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) + err = fmt.Errorf("%s did not terminate successfully: %w", cmd.Args[0], &ExitError{status}) } return status, err } @@ -403,7 +401,7 @@ func (r *Runc) Resume(context context.Context, id string) error { // Ps lists all the processes inside the container returning their pids func (r *Runc) Ps(context context.Context, id string) ([]int, error) { - data, err := cmdOutput(r.command(context, "ps", "--format", "json", id), true) + data, err := cmdOutput(r.command(context, "ps", "--format", "json", id), true, nil) defer putBuf(data) if err != nil { return nil, fmt.Errorf("%s: %s", err, data.String()) @@ -417,7 +415,7 @@ func (r *Runc) Ps(context context.Context, id string) ([]int, error) { // Top lists all the processes inside the container returning the full ps data func (r *Runc) Top(context context.Context, id string, psOptions string) (*TopResults, error) { - data, err := cmdOutput(r.command(context, "ps", "--format", "table", id, psOptions), true) + data, err := cmdOutput(r.command(context, "ps", "--format", "table", id, psOptions), true, nil) defer putBuf(data) if err != nil { return nil, fmt.Errorf("%s: %s", err, data.String()) @@ -452,6 +450,10 @@ type CheckpointOpts struct { // EmptyNamespaces creates a namespace for the container but does not save its properties // Provide the namespaces you wish to be checkpointed without their settings on restore EmptyNamespaces []string + // LazyPages uses userfaultfd to lazily restore memory pages + LazyPages bool + // StatusFile is the file criu writes \0 to once lazy-pages is ready + StatusFile *os.File } type CgroupMode string @@ -493,6 +495,9 @@ func (o *CheckpointOpts) args() (out []string) { for _, ns := range o.EmptyNamespaces { out = append(out, "--empty-ns", ns) } + if o.LazyPages { + out = append(out, "--lazy-pages") + } return out } @@ -511,13 +516,23 @@ func PreDump(args []string) []string { // Checkpoint allows you to checkpoint a container using criu func (r *Runc) Checkpoint(context context.Context, id string, opts *CheckpointOpts, actions ...CheckpointAction) error { args := []string{"checkpoint"} + extraFiles := []*os.File{} if opts != nil { args = append(args, opts.args()...) + if opts.StatusFile != nil { + // pass the status file to the child process + extraFiles = []*os.File{opts.StatusFile} + // set status-fd to 3 as this will be the file descriptor + // of the first file passed with cmd.ExtraFiles + args = append(args, "--status-fd", "3") + } } for _, a := range actions { args = a(args) } - return r.runOrError(r.command(context, append(args, id)...)) + cmd := r.command(context, append(args, id)...) + cmd.ExtraFiles = extraFiles + return r.runOrError(cmd) } type RestoreOpts struct { @@ -583,7 +598,7 @@ func (r *Runc) Restore(context context.Context, id, bundle string, opts *Restore } status, err := Monitor.Wait(cmd, ec) if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) + err = fmt.Errorf("%s did not terminate successfully: %w", cmd.Args[0], &ExitError{status}) } return status, err } @@ -612,7 +627,7 @@ type Version struct { // Version returns the runc and runtime-spec versions func (r *Runc) Version(context context.Context) (Version, error) { - data, err := cmdOutput(r.command(context, "--version"), false) + data, err := cmdOutput(r.command(context, "--version"), false, nil) defer putBuf(data) if err != nil { return Version{}, err @@ -680,11 +695,11 @@ func (r *Runc) runOrError(cmd *exec.Cmd) error { } status, err := Monitor.Wait(cmd, ec) if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) + err = fmt.Errorf("%s did not terminate successfully: %w", cmd.Args[0], &ExitError{status}) } return err } - data, err := cmdOutput(cmd, true) + data, err := cmdOutput(cmd, true, nil) defer putBuf(data) if err != nil { return fmt.Errorf("%s: %s", err, data.String()) @@ -694,7 +709,7 @@ func (r *Runc) runOrError(cmd *exec.Cmd) error { // callers of cmdOutput are expected to call putBuf on the returned Buffer // to ensure it is released back to the shared pool after use. -func cmdOutput(cmd *exec.Cmd, combined bool) (*bytes.Buffer, error) { +func cmdOutput(cmd *exec.Cmd, combined bool, started chan<- int) (*bytes.Buffer, error) { b := getBuf() cmd.Stdout = b @@ -705,11 +720,22 @@ func cmdOutput(cmd *exec.Cmd, combined bool) (*bytes.Buffer, error) { if err != nil { return nil, err } + if started != nil { + started <- cmd.Process.Pid + } status, err := Monitor.Wait(cmd, ec) if err == nil && status != 0 { - err = fmt.Errorf("%s did not terminate successfully", cmd.Args[0]) + err = fmt.Errorf("%s did not terminate successfully: %w", cmd.Args[0], &ExitError{status}) } return b, err } + +type ExitError struct { + Status int +} + +func (e *ExitError) Error() string { + return fmt.Sprintf("exit status %d", e.Status) +} diff --git a/vendor/github.com/containerd/go-runc/runc_unix.go b/vendor/github.com/containerd/go-runc/runc_unix.go new file mode 100644 index 0000000000000000000000000000000000000000..548ffd6b90c64764858332c50ea3b1dff96e894a --- /dev/null +++ b/vendor/github.com/containerd/go-runc/runc_unix.go @@ -0,0 +1,38 @@ +//+build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runc + +import ( + "golang.org/x/sys/unix" +) + +// Runc is the client to the runc cli +type Runc struct { + //If command is empty, DefaultCommand is used + Command string + Root string + Debug bool + Log string + LogFormat Format + PdeathSignal unix.Signal + Setpgid bool + Criu string + SystemdCgroup bool + Rootless *bool // nil stands for "auto" +} diff --git a/vendor/github.com/containerd/go-runc/runc_windows.go b/vendor/github.com/containerd/go-runc/runc_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..c5873de8b6f7f1546ebac95745f5e1e2c3b74f64 --- /dev/null +++ b/vendor/github.com/containerd/go-runc/runc_windows.go @@ -0,0 +1,31 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package runc + +// Runc is the client to the runc cli +type Runc struct { + //If command is empty, DefaultCommand is used + Command string + Root string + Debug bool + Log string + LogFormat Format + Setpgid bool + Criu string + SystemdCgroup bool + Rootless *bool // nil stands for "auto" +} diff --git a/vendor/github.com/mitchellh/hashstructure/go.mod b/vendor/github.com/mitchellh/hashstructure/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..966582aa95b41a2d7c4dd7517175acc8e53c1ecf --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/hashstructure diff --git a/vendor/github.com/moby/buildkit/README.md b/vendor/github.com/moby/buildkit/README.md index 19d73253ae878261d5700aa2811c9dc0439bf9e2..0af9e2e1267c653cf60dfb8444bb19471c390755 100644 --- a/vendor/github.com/moby/buildkit/README.md +++ b/vendor/github.com/moby/buildkit/README.md @@ -62,6 +62,7 @@ You don't need to read this document unless you want to use the full-featured st - [Expose BuildKit as a TCP service](#expose-buildkit-as-a-tcp-service) - [Load balancing](#load-balancing) - [Containerizing BuildKit](#containerizing-buildkit) + - [Podman](#podman) - [Kubernetes](#kubernetes) - [Daemonless](#daemonless) - [Opentracing support](#opentracing-support) @@ -127,11 +128,6 @@ We are open to adding more backends. The buildkitd daemon listens gRPC API on `/run/buildkit/buildkitd.sock` by default, but you can also use TCP sockets. See [Expose BuildKit as a TCP service](#expose-buildkit-as-a-tcp-service). -:information_source: Notice to Fedora 31 users: - -* As runc still does not work on cgroup v2 environment like Fedora 31, you need to substitute runc with crun. Run `buildkitd` with `--oci-worker-binary=crun`. -* If you want to use runc, you need to configure the system to use cgroup v1. Run `sudo grubby --update-kernel=ALL --args="systemd.unified_cgroup_hierarchy=0"` and reboot. - ### Exploring LLB BuildKit builds are based on a binary intermediate format called LLB that is used for defining the dependency graph for processes running part of your build. tl;dr: LLB is to Dockerfile what LLVM IR is to C. @@ -150,6 +146,9 @@ Currently, the following high-level languages has been implemented for LLB: - [Mockerfile](https://matt-rickard.com/building-a-new-dockerfile-frontend/) - [Gockerfile](https://github.com/po3rin/gockerfile) - [bldr (Pkgfile)](https://github.com/talos-systems/bldr/) +- [HLB](https://github.com/openllb/hlb) +- [Earthfile (Earthly)](https://github.com/earthly/earthly) +- [Cargo Wharf (Rust)](https://github.com/denzp/cargo-wharf) - (open a PR to add your own language) ### Exploring Dockerfiles @@ -353,6 +352,7 @@ The directory layout conforms to OCI Image Spec v1.0. - `mode=max`: export all the layers of all intermediate steps. Not supported for `inline` cache exporter. - `ref=docker.io/user/image:tag`: reference for `registry` cache exporter - `dest=path/to/output-dir`: directory for `local` cache exporter +- `oci-mediatypes=true|false`: whether to use OCI mediatypes in exported manifests for `local` and `registry` exporter. Since BuildKit `v0.8` defaults to true. #### `--import-cache` options - `type`: `registry` or `local`. Use `registry` to import `inline` cache. @@ -418,6 +418,16 @@ export BUILDKIT_HOST=docker-container://buildkitd buildctl build --help ``` +### Podman +To connect to a BuildKit daemon running in a Podman container, use `podman-container://` instead of `docker-container://` . + +```bash +podman run -d --name buildkitd --privileged moby/buildkit:latest +buildctl --addr=podman-container://buildkitd build --frontend dockerfile.v0 --local context=. --local dockerfile=. --output type=oci | podman load foo +``` + +`sudo` is not required. + ### Kubernetes For Kubernetes deployments, see [`examples/kubernetes`](./examples/kubernetes). diff --git a/vendor/github.com/moby/buildkit/api/services/control/generate.go b/vendor/github.com/moby/buildkit/api/services/control/generate.go index 1c161155f502d508c825de7bc5994ea5d94b07c6..9a3b24613e17984a8a69135132ca6ff57fef4981 100644 --- a/vendor/github.com/moby/buildkit/api/services/control/generate.go +++ b/vendor/github.com/moby/buildkit/api/services/control/generate.go @@ -1,3 +1,3 @@ -package moby_buildkit_v1 +package moby_buildkit_v1 //nolint:golint //go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. control.proto diff --git a/vendor/github.com/moby/buildkit/api/types/generate.go b/vendor/github.com/moby/buildkit/api/types/generate.go index 84007df1d9b8b45b3b423c266003ac022fba0746..984bb74ce1ec4c2a19ca6baa3ab2f33db6eb46dc 100644 --- a/vendor/github.com/moby/buildkit/api/types/generate.go +++ b/vendor/github.com/moby/buildkit/api/types/generate.go @@ -1,3 +1,3 @@ -package moby_buildkit_v1_types +package moby_buildkit_v1_types //nolint:golint //go:generate protoc -I=. -I=../../vendor/ -I=../../../../../ --gogo_out=plugins=grpc:. worker.proto diff --git a/vendor/github.com/moby/buildkit/cache/blobs.go b/vendor/github.com/moby/buildkit/cache/blobs.go new file mode 100644 index 0000000000000000000000000000000000000000..d3648b143431b05118c6abceb2cb0c868e11854c --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/blobs.go @@ -0,0 +1,236 @@ +package cache + +import ( + "context" + + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/mount" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/util/winlayers" + digest "github.com/opencontainers/go-digest" + imagespecidentity "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +var g flightcontrol.Group + +const containerdUncompressed = "containerd.io/uncompressed" + +type CompareWithParent interface { + CompareWithParent(ctx context.Context, ref string, opts ...diff.Opt) (ocispec.Descriptor, error) +} + +var ErrNoBlobs = errors.Errorf("no blobs for snapshot") + +// computeBlobChain ensures every ref in a parent chain has an associated blob in the content store. If +// a blob is missing and createIfNeeded is true, then the blob will be created, otherwise ErrNoBlobs will +// be returned. Caller must hold a lease when calling this function. +func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded bool, compressionType compression.Type, s session.Group) error { + if _, ok := leases.FromContext(ctx); !ok { + return errors.Errorf("missing lease requirement for computeBlobChain") + } + + if err := sr.Finalize(ctx, true); err != nil { + return err + } + + if isTypeWindows(sr) { + ctx = winlayers.UseWindowsLayerMode(ctx) + } + + return computeBlobChain(ctx, sr, createIfNeeded, compressionType, s) +} + +func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool, compressionType compression.Type, s session.Group) error { + baseCtx := ctx + eg, ctx := errgroup.WithContext(ctx) + var currentDescr ocispec.Descriptor + if sr.parent != nil { + eg.Go(func() error { + return computeBlobChain(ctx, sr.parent, createIfNeeded, compressionType, s) + }) + } + eg.Go(func() error { + dp, err := g.Do(ctx, sr.ID(), func(ctx context.Context) (interface{}, error) { + refInfo := sr.Info() + if refInfo.Blob != "" { + return nil, nil + } else if !createIfNeeded { + return nil, errors.WithStack(ErrNoBlobs) + } + + var mediaType string + switch compressionType { + case compression.Uncompressed: + mediaType = ocispec.MediaTypeImageLayer + case compression.Gzip: + mediaType = ocispec.MediaTypeImageLayerGzip + default: + return nil, errors.Errorf("unknown layer compression type: %q", compressionType) + } + + var descr ocispec.Descriptor + var err error + + if pc, ok := sr.cm.Differ.(CompareWithParent); ok { + descr, err = pc.CompareWithParent(ctx, sr.ID(), diff.WithMediaType(mediaType)) + if err != nil { + return nil, err + } + } + if descr.Digest == "" { + // reference needs to be committed + var lower []mount.Mount + if sr.parent != nil { + m, err := sr.parent.Mount(ctx, true, s) + if err != nil { + return nil, err + } + var release func() error + lower, release, err = m.Mount() + if err != nil { + return nil, err + } + if release != nil { + defer release() + } + } + m, err := sr.Mount(ctx, true, s) + if err != nil { + return nil, err + } + upper, release, err := m.Mount() + if err != nil { + return nil, err + } + if release != nil { + defer release() + } + descr, err = sr.cm.Differ.Compare(ctx, lower, upper, + diff.WithMediaType(mediaType), + diff.WithReference(sr.ID()), + ) + if err != nil { + return nil, err + } + } + + if descr.Annotations == nil { + descr.Annotations = map[string]string{} + } + + info, err := sr.cm.ContentStore.Info(ctx, descr.Digest) + if err != nil { + return nil, err + } + + if diffID, ok := info.Labels[containerdUncompressed]; ok { + descr.Annotations[containerdUncompressed] = diffID + } else if compressionType == compression.Uncompressed { + descr.Annotations[containerdUncompressed] = descr.Digest.String() + } else { + return nil, errors.Errorf("unknown layer compression type") + } + + return descr, nil + + }) + if err != nil { + return err + } + + if dp != nil { + currentDescr = dp.(ocispec.Descriptor) + } + return nil + }) + err := eg.Wait() + if err != nil { + return err + } + if currentDescr.Digest != "" { + if err := sr.setBlob(baseCtx, currentDescr); err != nil { + return err + } + } + return nil +} + +// setBlob associates a blob with the cache record. +// A lease must be held for the blob when calling this function +// Caller should call Info() for knowing what current values are actually set +func (sr *immutableRef) setBlob(ctx context.Context, desc ocispec.Descriptor) error { + if _, ok := leases.FromContext(ctx); !ok { + return errors.Errorf("missing lease requirement for setBlob") + } + + diffID, err := diffIDFromDescriptor(desc) + if err != nil { + return err + } + if _, err := sr.cm.ContentStore.Info(ctx, desc.Digest); err != nil { + return err + } + + sr.mu.Lock() + defer sr.mu.Unlock() + + if getChainID(sr.md) != "" { + return nil + } + + if err := sr.finalize(ctx, true); err != nil { + return err + } + + p := sr.parent + var parentChainID digest.Digest + var parentBlobChainID digest.Digest + if p != nil { + pInfo := p.Info() + if pInfo.ChainID == "" || pInfo.BlobChainID == "" { + return errors.Errorf("failed to set blob for reference with non-addressable parent") + } + parentChainID = pInfo.ChainID + parentBlobChainID = pInfo.BlobChainID + } + + if err := sr.cm.LeaseManager.AddResource(ctx, leases.Lease{ID: sr.ID()}, leases.Resource{ + ID: desc.Digest.String(), + Type: "content", + }); err != nil { + return err + } + + queueDiffID(sr.md, diffID.String()) + queueBlob(sr.md, desc.Digest.String()) + chainID := diffID + blobChainID := imagespecidentity.ChainID([]digest.Digest{desc.Digest, diffID}) + if parentChainID != "" { + chainID = imagespecidentity.ChainID([]digest.Digest{parentChainID, chainID}) + blobChainID = imagespecidentity.ChainID([]digest.Digest{parentBlobChainID, blobChainID}) + } + queueChainID(sr.md, chainID.String()) + queueBlobChainID(sr.md, blobChainID.String()) + queueMediaType(sr.md, desc.MediaType) + queueBlobSize(sr.md, desc.Size) + if err := sr.md.Commit(); err != nil { + return err + } + return nil +} + +func isTypeWindows(sr *immutableRef) bool { + if GetLayerType(sr) == "windows" { + return true + } + if parent := sr.parent; parent != nil { + return isTypeWindows(parent) + } + return false +} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go index 5d42d0417ee9dd55dd7fa542ef68e01554e3ec03..a8335eaa9c0adac2c7d1c06e5ecd6c9ae234d7db 100644 --- a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go +++ b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go @@ -11,12 +11,13 @@ import ( "sync" "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/locker" iradix "github.com/hashicorp/go-immutable-radix" "github.com/hashicorp/golang-lru/simplelru" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" + "github.com/moby/locker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/tonistiigi/fsutil" @@ -44,12 +45,12 @@ func getDefaultManager() *cacheManager { // header, "/dir" is for contents. For the root node "" (empty string) is the // key for root, "/" for the root header -func Checksum(ctx context.Context, ref cache.ImmutableRef, path string, followLinks bool) (digest.Digest, error) { - return getDefaultManager().Checksum(ctx, ref, path, followLinks) +func Checksum(ctx context.Context, ref cache.ImmutableRef, path string, followLinks bool, s session.Group) (digest.Digest, error) { + return getDefaultManager().Checksum(ctx, ref, path, followLinks, s) } -func ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, path string, followLinks bool) (digest.Digest, error) { - return getDefaultManager().ChecksumWildcard(ctx, ref, path, followLinks) +func ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, path string, followLinks bool, s session.Group) (digest.Digest, error) { + return getDefaultManager().ChecksumWildcard(ctx, ref, path, followLinks, s) } func GetCacheContext(ctx context.Context, md *metadata.StorageItem, idmap *idtools.IdentityMapping) (CacheContext, error) { @@ -65,8 +66,8 @@ func ClearCacheContext(md *metadata.StorageItem) { } type CacheContext interface { - Checksum(ctx context.Context, ref cache.Mountable, p string, followLinks bool) (digest.Digest, error) - ChecksumWildcard(ctx context.Context, ref cache.Mountable, p string, followLinks bool) (digest.Digest, error) + Checksum(ctx context.Context, ref cache.Mountable, p string, followLinks bool, s session.Group) (digest.Digest, error) + ChecksumWildcard(ctx context.Context, ref cache.Mountable, p string, followLinks bool, s session.Group) (digest.Digest, error) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) error } @@ -85,20 +86,20 @@ type cacheManager struct { lruMu sync.Mutex } -func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool) (digest.Digest, error) { +func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool, s session.Group) (digest.Digest, error) { cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()), ref.IdentityMapping()) if err != nil { return "", nil } - return cc.Checksum(ctx, ref, p, followLinks) + return cc.Checksum(ctx, ref, p, followLinks, s) } -func (cm *cacheManager) ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool) (digest.Digest, error) { +func (cm *cacheManager) ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool, s session.Group) (digest.Digest, error) { cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()), ref.IdentityMapping()) if err != nil { return "", nil } - return cc.ChecksumWildcard(ctx, ref, p, followLinks) + return cc.ChecksumWildcard(ctx, ref, p, followLinks, s) } func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.StorageItem, idmap *idtools.IdentityMapping) (CacheContext, error) { @@ -170,13 +171,14 @@ type mount struct { mountable cache.Mountable mountPath string unmount func() error + session session.Group } func (m *mount) mount(ctx context.Context) (string, error) { if m.mountPath != "" { return m.mountPath, nil } - mounts, err := m.mountable.Mount(ctx, true) + mounts, err := m.mountable.Mount(ctx, true, m.session) if err != nil { return "", err } @@ -380,13 +382,13 @@ func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.Fil return nil } -func (cc *cacheContext) ChecksumWildcard(ctx context.Context, mountable cache.Mountable, p string, followLinks bool) (digest.Digest, error) { - m := &mount{mountable: mountable} +func (cc *cacheContext) ChecksumWildcard(ctx context.Context, mountable cache.Mountable, p string, followLinks bool, s session.Group) (digest.Digest, error) { + m := &mount{mountable: mountable, session: s} defer m.clean() wildcards, err := cc.wildcards(ctx, m, p) if err != nil { - return "", nil + return "", err } if followLinks { @@ -413,13 +415,12 @@ func (cc *cacheContext) ChecksumWildcard(ctx context.Context, mountable cache.Mo digester.Hash().Write([]byte(w.Record.Digest)) } return digester.Digest(), nil - } else { - return wildcards[0].Record.Digest, nil } + return wildcards[0].Record.Digest, nil } -func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, p string, followLinks bool) (digest.Digest, error) { - m := &mount{mountable: mountable} +func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, p string, followLinks bool, s session.Group) (digest.Digest, error) { + m := &mount{mountable: mountable, session: s} defer m.clean() return cc.checksumFollow(ctx, m, p, followLinks) @@ -688,24 +689,24 @@ func (cc *cacheContext) needsScanFollow(root *iradix.Node, p string, linksWalked if p == "/" { p = "" } - if v, ok := root.Get(convertPathToKey([]byte(p))); !ok { + v, ok := root.Get(convertPathToKey([]byte(p))) + if !ok { if p == "" { return true, nil } return cc.needsScanFollow(root, path.Clean(path.Dir(p)), linksWalked) - } else { - cr := v.(*CacheRecord) - if cr.Type == CacheRecordTypeSymlink { - if *linksWalked > 255 { - return false, errTooManyLinks - } - *linksWalked++ - link := path.Clean(cr.Linkname) - if !path.IsAbs(cr.Linkname) { - link = path.Join("/", path.Dir(p), link) - } - return cc.needsScanFollow(root, link, linksWalked) + } + cr := v.(*CacheRecord) + if cr.Type == CacheRecordTypeSymlink { + if *linksWalked > 255 { + return false, errTooManyLinks + } + *linksWalked++ + link := path.Clean(cr.Linkname) + if !path.IsAbs(cr.Linkname) { + link = path.Join("/", path.Dir(p), link) } + return cc.needsScanFollow(root, link, linksWalked) } return false, nil } @@ -875,12 +876,15 @@ func ensureOriginMetadata(md *metadata.StorageItem) *metadata.StorageItem { } var pool32K = sync.Pool{ - New: func() interface{} { return make([]byte, 32*1024) }, // 32K + New: func() interface{} { + buf := make([]byte, 32*1024) // 32K + return &buf + }, } func poolsCopy(dst io.Writer, src io.Reader) (written int64, err error) { - buf := pool32K.Get().([]byte) - written, err = io.CopyBuffer(dst, src, buf) + buf := pool32K.Get().(*[]byte) + written, err = io.CopyBuffer(dst, src, *buf) pool32K.Put(buf) return } diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go b/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go index 84018e7852262845abdf6a266acb13ce782c7a80..0b5267101b03cf5c3f31be4fb5b4854637862813 100644 --- a/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go +++ b/vendor/github.com/moby/buildkit/cache/contenthash/filehash.go @@ -40,20 +40,22 @@ func NewFileHash(path string, fi os.FileInfo) (hash.Hash, error) { } func NewFromStat(stat *fstypes.Stat) (hash.Hash, error) { + // Clear the socket bit since archive/tar.FileInfoHeader does not handle it + stat.Mode &^= uint32(os.ModeSocket) + fi := &statInfo{stat} hdr, err := tar.FileInfoHeader(fi, stat.Linkname) if err != nil { return nil, err } hdr.Name = "" // note: empty name is different from current has in docker build. Name is added on recursive directory scan instead - hdr.Mode = int64(chmodWindowsTarEntry(os.FileMode(hdr.Mode))) hdr.Devmajor = stat.Devmajor hdr.Devminor = stat.Devminor if len(stat.Xattrs) > 0 { - hdr.Xattrs = make(map[string]string, len(stat.Xattrs)) + hdr.PAXRecords = make(map[string]string, len(stat.Xattrs)) for k, v := range stat.Xattrs { - hdr.Xattrs[k] = string(v) + hdr.PAXRecords["SCHILY.xattr."+k] = string(v) } } // fmt.Printf("hdr: %#v\n", hdr) diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go index 4f610d772db6a08c31679b83bb869700132ca782..ccd2ebd2bbf54dfa7348a52e552368025b8eea41 100644 --- a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go +++ b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go @@ -12,10 +12,6 @@ import ( "golang.org/x/sys/unix" ) -func chmodWindowsTarEntry(perm os.FileMode) os.FileMode { - return perm -} - func setUnixOpt(path string, fi os.FileInfo, stat *fstypes.Stat) error { s := fi.Sys().(*syscall.Stat_t) diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go index e15bf1e5abd8a808ef6c2252b72e0e2c9d4df8cf..c6bfce9e61bee05e75d5bd06ca1617b354fe438d 100644 --- a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go +++ b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go @@ -8,16 +8,6 @@ import ( fstypes "github.com/tonistiigi/fsutil/types" ) -// chmodWindowsTarEntry is used to adjust the file permissions used in tar -// header based on the platform the archival is done. -func chmodWindowsTarEntry(perm os.FileMode) os.FileMode { - perm &= 0755 - // Add the x bit: make everything +x from windows - perm |= 0111 - - return perm -} - func setUnixOpt(path string, fi os.FileInfo, stat *fstypes.Stat) error { return nil } diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go b/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go index de72d6cdd0abf52d0ed1a72c8126d102dab3acf1..a7f192d121ee05bc99d37f1877fcddd9c702782e 100644 --- a/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go +++ b/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go @@ -36,11 +36,24 @@ func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { } func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + pax := h.PAXRecords + if len(h.Xattrs) > 0 { //nolint deprecated + if pax == nil { + pax = map[string]string{} + for k, v := range h.Xattrs { //nolint deprecated + pax["SCHILY.xattr."+k] = v + } + } + } + // Get extended attributes. - xAttrKeys := make([]string, len(h.Xattrs)) - for k := range h.Xattrs { - if k == "security.capability" || !strings.HasPrefix(k, "security.") && !strings.HasPrefix(k, "system.") { - xAttrKeys = append(xAttrKeys, k) + xAttrKeys := make([]string, len(h.PAXRecords)) + for k := range pax { + if strings.HasPrefix(k, "SCHILY.xattr.") { + k = strings.TrimPrefix(k, "SCHILY.xattr.") + if k == "security.capability" || !strings.HasPrefix(k, "security.") && !strings.HasPrefix(k, "system.") { + xAttrKeys = append(xAttrKeys, k) + } } } sort.Strings(xAttrKeys) @@ -56,7 +69,7 @@ func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { // Finally, append the sorted xattrs. for _, k := range xAttrKeys { - orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + orderedHeaders = append(orderedHeaders, [2]string{k, h.PAXRecords["SCHILY.xattr."+k]}) } return diff --git a/vendor/github.com/moby/buildkit/cache/manager.go b/vendor/github.com/moby/buildkit/cache/manager.go index 3d1e8843607db46a376a4ca0e99a7a7ab976ea55..0fb0103e5088e769b4d64555e0a5f4902250ccb2 100644 --- a/vendor/github.com/moby/buildkit/cache/manager.go +++ b/vendor/github.com/moby/buildkit/cache/manager.go @@ -8,6 +8,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/filters" "github.com/containerd/containerd/gc" "github.com/containerd/containerd/leases" @@ -15,7 +16,9 @@ import ( "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/client" "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/util/flightcontrol" digest "github.com/opencontainers/go-digest" imagespecidentity "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -38,14 +41,15 @@ type ManagerOpt struct { PruneRefChecker ExternalRefCheckerFunc GarbageCollect func(ctx context.Context) (gc.Stats, error) Applier diff.Applier + Differ diff.Comparer } type Accessor interface { GetByBlob(ctx context.Context, desc ocispec.Descriptor, parent ImmutableRef, opts ...RefOption) (ImmutableRef, error) Get(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) - New(ctx context.Context, parent ImmutableRef, opts ...RefOption) (MutableRef, error) - GetMutable(ctx context.Context, id string) (MutableRef, error) // Rebase? + New(ctx context.Context, parent ImmutableRef, s session.Group, opts ...RefOption) (MutableRef, error) + GetMutable(ctx context.Context, id string, opts ...RefOption) (MutableRef, error) // Rebase? IdentityMapping() *idtools.IdentityMapping Metadata(string) *metadata.StorageItem } @@ -74,6 +78,7 @@ type cacheManager struct { md *metadata.Store muPrune sync.Mutex // make sure parallel prune is not allowed so there will not be inconsistent results + unlazyG flightcontrol.Group } func NewManager(opt ManagerOpt) (Manager, error) { @@ -92,7 +97,7 @@ func NewManager(opt ManagerOpt) (Manager, error) { return cm, nil } -func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, parent ImmutableRef, opts ...RefOption) (ir ImmutableRef, err error) { +func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, parent ImmutableRef, opts ...RefOption) (ir ImmutableRef, rerr error) { diffID, err := diffIDFromDescriptor(desc) if err != nil { return nil, err @@ -100,9 +105,12 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, chainID := diffID blobChainID := imagespecidentity.ChainID([]digest.Digest{desc.Digest, diffID}) - if desc.Digest != "" { - if _, err := cm.ContentStore.Info(ctx, desc.Digest); err != nil { - return nil, errors.Wrapf(err, "failed to get blob %s", desc.Digest) + descHandlers := descHandlersOf(opts...) + if desc.Digest != "" && (descHandlers == nil || descHandlers[desc.Digest] == nil) { + if _, err := cm.ContentStore.Info(ctx, desc.Digest); errors.Is(err, errdefs.ErrNotFound) { + return nil, NeedsRemoteProvidersError([]digest.Digest{desc.Digest}) + } else if err != nil { + return nil, err } } @@ -115,7 +123,8 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, } chainID = imagespecidentity.ChainID([]digest.Digest{pInfo.ChainID, chainID}) blobChainID = imagespecidentity.ChainID([]digest.Digest{pInfo.BlobChainID, blobChainID}) - p2, err := cm.Get(ctx, parent.ID(), NoUpdateLastUsed) + + p2, err := cm.Get(ctx, parent.ID(), NoUpdateLastUsed, descHandlers) if err != nil { return nil, err } @@ -128,7 +137,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, releaseParent := false defer func() { - if releaseParent || err != nil && p != nil { + if releaseParent || rerr != nil && p != nil { p.Release(context.TODO()) } }() @@ -141,14 +150,17 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, return nil, err } - for _, si := range sis { - ref, err := cm.get(ctx, si.ID(), opts...) + if len(sis) > 0 { + ref, err := cm.get(ctx, sis[0].ID(), opts...) if err != nil && !IsNotFound(err) { - return nil, errors.Wrapf(err, "failed to get record %s by blobchainid", si.ID()) + return nil, errors.Wrapf(err, "failed to get record %s by blobchainid", sis[0].ID()) } if p != nil { releaseParent = true } + if err := setImageRefMetadata(ref, opts...); err != nil { + return nil, errors.Wrapf(err, "failed to append image ref metadata to ref %s", ref.ID()) + } return ref, nil } @@ -158,13 +170,12 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, } var link ImmutableRef - for _, si := range sis { - ref, err := cm.get(ctx, si.ID(), opts...) + if len(sis) > 0 { + ref, err := cm.get(ctx, sis[0].ID(), opts...) if err != nil && !IsNotFound(err) { - return nil, errors.Wrapf(err, "failed to get record %s by chainid", si.ID()) + return nil, errors.Wrapf(err, "failed to get record %s by chainid", sis[0].ID()) } link = ref - break } id := identity.NewID() @@ -188,7 +199,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, } defer func() { - if err != nil { + if rerr != nil { if err := cm.ManagerOpt.LeaseManager.Delete(context.TODO(), leases.Lease{ ID: l.ID, }); err != nil { @@ -227,6 +238,10 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, return nil, err } + if err := setImageRefMetadata(rec, opts...); err != nil { + return nil, errors.Wrapf(err, "failed to append image ref metadata to ref %s", rec.ID()) + } + queueDiffID(rec.md, diffID.String()) queueBlob(rec.md, desc.Digest.String()) queueChainID(rec.md, chainID.String()) @@ -234,6 +249,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, queueSnapshotID(rec.md, snapshotID) queueBlobOnly(rec.md, blobOnly) queueMediaType(rec.md, desc.MediaType) + queueBlobSize(rec.md, desc.Size) queueCommitted(rec.md) if err := rec.md.Commit(); err != nil { @@ -242,7 +258,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, cm.records[id] = rec - return rec.ref(true), nil + return rec.ref(true, descHandlers), nil } // init loads all snapshots from metadata state and tries to load the records @@ -308,25 +324,52 @@ func (cm *cacheManager) get(ctx context.Context, id string, opts ...RefOption) ( } } + descHandlers := descHandlersOf(opts...) + if rec.mutable { if len(rec.refs) != 0 { return nil, errors.Wrapf(ErrLocked, "%s is locked", id) } if rec.equalImmutable != nil { - return rec.equalImmutable.ref(triggerUpdate), nil + return rec.equalImmutable.ref(triggerUpdate, descHandlers), nil } - return rec.mref(triggerUpdate).commit(ctx) + return rec.mref(triggerUpdate, descHandlers).commit(ctx) } - return rec.ref(triggerUpdate), nil + return rec.ref(triggerUpdate, descHandlers), nil } // getRecord returns record for id. Requires manager lock. func (cm *cacheManager) getRecord(ctx context.Context, id string, opts ...RefOption) (cr *cacheRecord, retErr error) { + checkLazyProviders := func(rec *cacheRecord) error { + missing := NeedsRemoteProvidersError(nil) + dhs := descHandlersOf(opts...) + for { + blob := digest.Digest(getBlob(rec.md)) + if isLazy, err := rec.isLazy(ctx); err != nil { + return err + } else if isLazy && dhs[blob] == nil { + missing = append(missing, blob) + } + + if rec.parent == nil { + break + } + rec = rec.parent.cacheRecord + } + if len(missing) > 0 { + return missing + } + return nil + } + if rec, ok := cm.records[id]; ok { if rec.isDead() { return nil, errors.Wrapf(errNotFound, "failed to get dead record %s", id) } + if err := checkLazyProviders(rec); err != nil { + return nil, err + } return rec, nil } @@ -343,11 +386,17 @@ func (cm *cacheManager) getRecord(ctx context.Context, id string, opts ...RefOpt } return nil, err } + + // parent refs are possibly lazy so keep it hold the description handlers. + var dhs DescHandlers + if mutable.parent != nil { + dhs = mutable.parent.descHandlers + } rec := &cacheRecord{ mu: &sync.Mutex{}, cm: cm, refs: make(map[ref]struct{}), - parent: mutable.parentRef(false), + parent: mutable.parentRef(false, dhs), md: md, equalMutable: &mutableRef{cacheRecord: mutable}, } @@ -393,25 +442,39 @@ func (cm *cacheManager) getRecord(ctx context.Context, id string, opts ...RefOpt return nil, err } + if err := setImageRefMetadata(rec, opts...); err != nil { + return nil, errors.Wrapf(err, "failed to append image ref metadata to ref %s", rec.ID()) + } + cm.records[id] = rec + if err := checkLazyProviders(rec); err != nil { + return nil, err + } return rec, nil } -func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, opts ...RefOption) (mr MutableRef, err error) { +func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, sess session.Group, opts ...RefOption) (mr MutableRef, err error) { id := identity.NewID() var parent *immutableRef var parentID string var parentSnapshotID string if s != nil { - p, err := cm.Get(ctx, s.ID(), NoUpdateLastUsed) - if err != nil { + if _, ok := s.(*immutableRef); ok { + parent = s.Clone().(*immutableRef) + } else { + p, err := cm.Get(ctx, s.ID(), append(opts, NoUpdateLastUsed)...) + if err != nil { + return nil, err + } + parent = p.(*immutableRef) + } + if err := parent.Finalize(ctx, true); err != nil { return nil, err } - if err := p.Finalize(ctx, true); err != nil { + if err := parent.Extract(ctx, sess); err != nil { return nil, err } - parent = p.(*immutableRef) parentSnapshotID = getSnapshotID(parent.md) parentID = parent.ID() } @@ -469,18 +532,28 @@ func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, opts ...RefOpti return nil, err } + if err := setImageRefMetadata(rec, opts...); err != nil { + return nil, errors.Wrapf(err, "failed to append image ref metadata to ref %s", rec.ID()) + } + cm.mu.Lock() defer cm.mu.Unlock() cm.records[id] = rec // TODO: save to db - return rec.mref(true), nil + // parent refs are possibly lazy so keep it hold the description handlers. + var dhs DescHandlers + if parent != nil { + dhs = parent.descHandlers + } + return rec.mref(true, dhs), nil } -func (cm *cacheManager) GetMutable(ctx context.Context, id string) (MutableRef, error) { + +func (cm *cacheManager) GetMutable(ctx context.Context, id string, opts ...RefOption) (MutableRef, error) { cm.mu.Lock() defer cm.mu.Unlock() - rec, err := cm.getRecord(ctx, id) + rec, err := cm.getRecord(ctx, id, opts...) if err != nil { return nil, err } @@ -506,7 +579,7 @@ func (cm *cacheManager) GetMutable(ctx context.Context, id string) (MutableRef, rec.equalImmutable = nil } - return rec.mref(true), nil + return rec.mref(true, descHandlersOf(opts...)), nil } func (cm *cacheManager) Prune(ctx context.Context, ch chan client.UsageInfo, opts ...client.PruneInfo) error { @@ -957,6 +1030,31 @@ func WithCreationTime(tm time.Time) RefOption { } } +// Need a separate type for imageRef because it needs to be called outside +// initializeMetadata while still being a RefOption, so wrapping it in a +// different type ensures initializeMetadata won't catch it too and duplicate +// setting the metadata. +type imageRefOption func(m withMetadata) error + +// WithImageRef appends the given imageRef to the cache ref's metadata +func WithImageRef(imageRef string) RefOption { + return imageRefOption(func(m withMetadata) error { + return appendImageRef(m.Metadata(), imageRef) + }) +} + +func setImageRefMetadata(m withMetadata, opts ...RefOption) error { + md := m.Metadata() + for _, opt := range opts { + if fn, ok := opt.(imageRefOption); ok { + if err := fn(m); err != nil { + return err + } + } + } + return md.Commit() +} + func initializeMetadata(m withMetadata, parent string, opts ...RefOption) error { md := m.Metadata() if tm := GetCreatedAt(md); !tm.IsZero() { diff --git a/vendor/github.com/moby/buildkit/cache/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata.go index bf4041e098a39e179690147cda2308501343d402..a5c7df7f4cf033cbc1fcb73f2a883dd3214f8fb4 100644 --- a/vendor/github.com/moby/buildkit/cache/metadata.go +++ b/vendor/github.com/moby/buildkit/cache/metadata.go @@ -28,6 +28,10 @@ const keyBlob = "cache.blob" const keySnapshot = "cache.snapshot" const keyBlobOnly = "cache.blobonly" const keyMediaType = "cache.mediatype" +const keyImageRefs = "cache.imageRefs" + +// BlobSize is the packed blob size as specified in the oci descriptor +const keyBlobSize = "cache.blobsize" const keyDeleted = "cache.deleted" @@ -307,6 +311,63 @@ func getSize(si *metadata.StorageItem) int64 { return size } +func appendImageRef(si *metadata.StorageItem, s string) error { + return si.GetAndSetValue(keyImageRefs, func(v *metadata.Value) (*metadata.Value, error) { + var imageRefs []string + if v != nil { + if err := v.Unmarshal(&imageRefs); err != nil { + return nil, err + } + } + for _, existing := range imageRefs { + if existing == s { + return nil, metadata.ErrSkipSetValue + } + } + imageRefs = append(imageRefs, s) + v, err := metadata.NewValue(imageRefs) + if err != nil { + return nil, errors.Wrap(err, "failed to create imageRefs value") + } + return v, nil + }) +} + +func getImageRefs(si *metadata.StorageItem) []string { + v := si.Get(keyImageRefs) + if v == nil { + return nil + } + var refs []string + if err := v.Unmarshal(&refs); err != nil { + return nil + } + return refs +} + +func queueBlobSize(si *metadata.StorageItem, s int64) error { + v, err := metadata.NewValue(s) + if err != nil { + return errors.Wrap(err, "failed to create blobsize value") + } + si.Queue(func(b *bolt.Bucket) error { + return si.SetValue(b, keyBlobSize, v) + }) + return nil +} + +func getBlobSize(si *metadata.StorageItem) int64 { + v := si.Get(keyBlobSize) + if v == nil { + return sizeUnknown + } + var size int64 + if err := v.Unmarshal(&size); err != nil { + return sizeUnknown + } + return size +} + func getEqualMutable(si *metadata.StorageItem) string { v := si.Get(keyEqualMutable) if v == nil { diff --git a/vendor/github.com/moby/buildkit/cache/metadata/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata/metadata.go index 42e8cb4011df8d886fe1f9f6a14c014449229cbc..b0b22212b54a8fa7bb14579e12f5fc3255840b19 100644 --- a/vendor/github.com/moby/buildkit/cache/metadata/metadata.go +++ b/vendor/github.com/moby/buildkit/cache/metadata/metadata.go @@ -372,6 +372,22 @@ func (s *StorageItem) SetValue(b *bolt.Bucket, key string, v *Value) error { return nil } +var ErrSkipSetValue = errors.New("skip setting metadata value") + +func (s *StorageItem) GetAndSetValue(key string, fn func(*Value) (*Value, error)) error { + s.mu.Lock() + defer s.mu.Unlock() + return s.Update(func(b *bolt.Bucket) error { + v, err := fn(s.values[key]) + if errors.Is(err, ErrSkipSetValue) { + return nil + } else if err != nil { + return err + } + return s.SetValue(b, key, v) + }) +} + type Value struct { Value json.RawMessage `json:"value,omitempty"` Index string `json:"index,omitempty"` diff --git a/vendor/github.com/moby/buildkit/cache/opts.go b/vendor/github.com/moby/buildkit/cache/opts.go new file mode 100644 index 0000000000000000000000000000000000000000..911def3e1f385886f485a24361114de45e354365 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/opts.go @@ -0,0 +1,35 @@ +package cache + +import ( + "fmt" + + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/progress" + digest "github.com/opencontainers/go-digest" +) + +type DescHandler struct { + Provider func(session.Group) content.Provider + Progress progress.Controller + SnapshotLabels map[string]string +} + +type DescHandlers map[digest.Digest]*DescHandler + +func descHandlersOf(opts ...RefOption) DescHandlers { + for _, opt := range opts { + if opt, ok := opt.(DescHandlers); ok { + return opt + } + } + return nil +} + +type DescHandlerKey digest.Digest + +type NeedsRemoteProvidersError []digest.Digest + +func (m NeedsRemoteProvidersError) Error() string { + return fmt.Sprintf("missing descriptor handlers for lazy blobs %+v", []digest.Digest(m)) +} diff --git a/vendor/github.com/moby/buildkit/cache/refs.go b/vendor/github.com/moby/buildkit/cache/refs.go index d932f4e96c045a19f2d960aebbc5101fa6d11b68..2b25235e44ca3b1f31ef60d5ef0e60bda0f4bf87 100644 --- a/vendor/github.com/moby/buildkit/cache/refs.go +++ b/vendor/github.com/moby/buildkit/cache/refs.go @@ -14,14 +14,18 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/winlayers" digest "github.com/opencontainers/go-digest" - imagespecidentity "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" ) // Ref is a reference to cacheable objects. @@ -41,8 +45,8 @@ type ImmutableRef interface { Clone() ImmutableRef Info() RefInfo - SetBlob(ctx context.Context, desc ocispec.Descriptor) error - Extract(ctx context.Context) error // +progress + Extract(ctx context.Context, s session.Group) error // +progress + GetRemote(ctx context.Context, createIfNeeded bool, compressionType compression.Type, s session.Group) (*solver.Remote, error) } type RefInfo struct { @@ -61,7 +65,7 @@ type MutableRef interface { } type Mountable interface { - Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) + Mount(ctx context.Context, readonly bool, s session.Group) (snapshot.Mountable, error) } type ref interface { @@ -93,15 +97,23 @@ type cacheRecord struct { } // hold ref lock before calling -func (cr *cacheRecord) ref(triggerLastUsed bool) *immutableRef { - ref := &immutableRef{cacheRecord: cr, triggerLastUsed: triggerLastUsed} +func (cr *cacheRecord) ref(triggerLastUsed bool, descHandlers DescHandlers) *immutableRef { + ref := &immutableRef{ + cacheRecord: cr, + triggerLastUsed: triggerLastUsed, + descHandlers: descHandlers, + } cr.refs[ref] = struct{}{} return ref } // hold ref lock before calling -func (cr *cacheRecord) mref(triggerLastUsed bool) *mutableRef { - ref := &mutableRef{cacheRecord: cr, triggerLastUsed: triggerLastUsed} +func (cr *cacheRecord) mref(triggerLastUsed bool, descHandlers DescHandlers) *mutableRef { + ref := &mutableRef{ + cacheRecord: cr, + triggerLastUsed: triggerLastUsed, + descHandlers: descHandlers, + } cr.refs[ref] = struct{}{} return ref } @@ -131,6 +143,22 @@ func (cr *cacheRecord) isDead() bool { return cr.dead || (cr.equalImmutable != nil && cr.equalImmutable.dead) || (cr.equalMutable != nil && cr.equalMutable.dead) } +func (cr *cacheRecord) isLazy(ctx context.Context) (bool, error) { + if !getBlobOnly(cr.md) { + return false, nil + } + dgst := getBlob(cr.md) + // special case for moby where there is no compressed blob (empty digest) + if dgst == "" { + return false, nil + } + _, err := cr.cm.ContentStore.Info(ctx, digest.Digest(dgst)) + if errors.Is(err, errdefs.ErrNotFound) { + return true, nil + } + return false, err +} + func (cr *cacheRecord) IdentityMapping() *idtools.IdentityMapping { return cr.cm.IdentityMapping() } @@ -186,27 +214,18 @@ func (cr *cacheRecord) Size(ctx context.Context) (int64, error) { return s.(int64), nil } -func (cr *cacheRecord) Parent() ImmutableRef { - if p := cr.parentRef(true); p != nil { // avoid returning typed nil pointer - return p - } - return nil -} - -func (cr *cacheRecord) parentRef(hidden bool) *immutableRef { +func (cr *cacheRecord) parentRef(hidden bool, descHandlers DescHandlers) *immutableRef { p := cr.parent if p == nil { return nil } p.mu.Lock() defer p.mu.Unlock() - return p.ref(hidden) + return p.ref(hidden, descHandlers) } -func (cr *cacheRecord) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { - cr.mu.Lock() - defer cr.mu.Unlock() - +// must be called holding cacheRecord mu +func (cr *cacheRecord) mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { if cr.mutable { m, err := cr.cm.Snapshotter.Mounts(ctx, getSnapshotID(cr.md)) if err != nil { @@ -282,20 +301,29 @@ func (cr *cacheRecord) ID() string { type immutableRef struct { *cacheRecord triggerLastUsed bool + descHandlers DescHandlers } type mutableRef struct { *cacheRecord triggerLastUsed bool + descHandlers DescHandlers } func (sr *immutableRef) Clone() ImmutableRef { sr.mu.Lock() - ref := sr.ref(false) + ref := sr.ref(false, sr.descHandlers) sr.mu.Unlock() return ref } +func (sr *immutableRef) Parent() ImmutableRef { + if p := sr.parentRef(true, sr.descHandlers); p != nil { // avoid returning typed nil pointer + return p + } + return nil +} + func (sr *immutableRef) Info() RefInfo { return RefInfo{ ChainID: digest.Digest(getChainID(sr.md)), @@ -308,25 +336,181 @@ func (sr *immutableRef) Info() RefInfo { } } -func (sr *immutableRef) Extract(ctx context.Context) error { - _, err := sr.sizeG.Do(ctx, sr.ID()+"-extract", func(ctx context.Context) (interface{}, error) { +func (sr *immutableRef) ociDesc() (ocispec.Descriptor, error) { + desc := ocispec.Descriptor{ + Digest: digest.Digest(getBlob(sr.md)), + Size: getBlobSize(sr.md), + MediaType: getMediaType(sr.md), + Annotations: make(map[string]string), + } + + diffID := getDiffID(sr.md) + if diffID != "" { + desc.Annotations["containerd.io/uncompressed"] = diffID + } + + createdAt := GetCreatedAt(sr.md) + if !createdAt.IsZero() { + createdAt, err := createdAt.MarshalText() + if err != nil { + return ocispec.Descriptor{}, err + } + desc.Annotations["buildkit/createdat"] = string(createdAt) + } + + return desc, nil +} + +// order is from parent->child, sr will be at end of slice +func (sr *immutableRef) parentRefChain() []*immutableRef { + var count int + for ref := sr; ref != nil; ref = ref.parent { + count++ + } + refs := make([]*immutableRef, count) + for i, ref := count-1, sr; ref != nil; i, ref = i-1, ref.parent { + refs[i] = ref + } + return refs +} + +func (sr *immutableRef) Mount(ctx context.Context, readonly bool, s session.Group) (snapshot.Mountable, error) { + if err := sr.Extract(ctx, s); err != nil { + return nil, err + } + + sr.mu.Lock() + defer sr.mu.Unlock() + return sr.mount(ctx, readonly) +} + +func (sr *immutableRef) Extract(ctx context.Context, s session.Group) (rerr error) { + if !getBlobOnly(sr.md) { + return + } + + ctx, done, err := leaseutil.WithLease(ctx, sr.cm.LeaseManager, leaseutil.MakeTemporary) + if err != nil { + return err + } + defer done(ctx) + + if GetLayerType(sr) == "windows" { + ctx = winlayers.UseWindowsLayerMode(ctx) + } + + if _, err := sr.prepareRemoteSnapshots(ctx, sr.descHandlers); err != nil { + return err + } + + return sr.extract(ctx, sr.descHandlers, s) +} + +func (sr *immutableRef) prepareRemoteSnapshots(ctx context.Context, dhs DescHandlers) (bool, error) { + ok, err := sr.sizeG.Do(ctx, sr.ID()+"-prepare-remote-snapshot", func(ctx context.Context) (_ interface{}, rerr error) { snapshotID := getSnapshotID(sr.md) if _, err := sr.cm.Snapshotter.Stat(ctx, snapshotID); err == nil { - queueBlobOnly(sr.md, false) - return nil, sr.md.Commit() + return true, nil + } + desc, err := sr.ociDesc() + if err != nil { + return false, err + } + dh := dhs[desc.Digest] + if dh == nil { + return false, nil } parentID := "" if sr.parent != nil { - if err := sr.parent.Extract(ctx); err != nil { - return nil, err + if ok, err := sr.parent.prepareRemoteSnapshots(ctx, dhs); !ok { + return false, err } parentID = getSnapshotID(sr.parent.md) } - info := sr.Info() - key := fmt.Sprintf("extract-%s %s", identity.NewID(), info.ChainID) - err := sr.cm.Snapshotter.Prepare(ctx, key, parentID) + // Hint labels to the snapshotter + labels := dh.SnapshotLabels + if labels == nil { + labels = make(map[string]string) + } + labels["containerd.io/snapshot.ref"] = snapshotID + opt := snapshots.WithLabels(labels) + + // Try to preapre the remote snapshot + key := fmt.Sprintf("tmp-%s %s", identity.NewID(), sr.Info().ChainID) + if err = sr.cm.Snapshotter.Prepare(ctx, key, parentID, opt); err != nil { + if errdefs.IsAlreadyExists(err) { + // Check if the targeting snapshot ID has been prepared as a remote + // snapshot in the snapshotter. + if _, err := sr.cm.Snapshotter.Stat(ctx, snapshotID); err == nil { + // We can use this remote snapshot without unlazying. + // Try the next layer as well. + return true, nil + } + } + } + + // This layer cannot be prepared without unlazying. + return false, nil + }) + return ok.(bool), err +} + +func (sr *immutableRef) extract(ctx context.Context, dhs DescHandlers, s session.Group) error { + _, err := sr.sizeG.Do(ctx, sr.ID()+"-extract", func(ctx context.Context) (_ interface{}, rerr error) { + snapshotID := getSnapshotID(sr.md) + if _, err := sr.cm.Snapshotter.Stat(ctx, snapshotID); err == nil { + return nil, nil + } + + if sr.cm.Applier == nil { + return nil, errors.New("extract requires an applier") + } + + eg, egctx := errgroup.WithContext(ctx) + + parentID := "" + if sr.parent != nil { + eg.Go(func() error { + if err := sr.parent.extract(egctx, dhs, s); err != nil { + return err + } + parentID = getSnapshotID(sr.parent.md) + return nil + }) + } + + desc, err := sr.ociDesc() + if err != nil { + return nil, err + } + dh := dhs[desc.Digest] + + eg.Go(func() error { + // unlazies if needed, otherwise a no-op + return lazyRefProvider{ + ref: sr, + desc: desc, + dh: dh, + session: s, + }.Unlazy(egctx) + }) + + if err := eg.Wait(); err != nil { + return nil, err + } + + if dh != nil && dh.Progress != nil { + _, stopProgress := dh.Progress.Start(ctx) + defer stopProgress(rerr) + statusDone := dh.Progress.Status("extracting "+desc.Digest.String(), "extracting") + defer statusDone() + } + + key := fmt.Sprintf("extract-%s %s", identity.NewID(), sr.Info().ChainID) + + err = sr.cm.Snapshotter.Prepare(ctx, key, parentID) if err != nil { return nil, err } @@ -339,10 +523,7 @@ func (sr *immutableRef) Extract(ctx context.Context) error { if err != nil { return nil, err } - _, err = sr.cm.Applier.Apply(ctx, ocispec.Descriptor{ - Digest: info.Blob, - MediaType: info.MediaType, - }, mounts) + _, err = sr.cm.Applier.Apply(ctx, desc, mounts) if err != nil { unmount() return nil, err @@ -357,6 +538,7 @@ func (sr *immutableRef) Extract(ctx context.Context) error { } } queueBlobOnly(sr.md, false) + setSize(sr.md, sizeUnknown) if err := sr.md.Commit(); err != nil { return nil, err } @@ -365,65 +547,6 @@ func (sr *immutableRef) Extract(ctx context.Context) error { return err } -// SetBlob associates a blob with the cache record. -// A lease must be held for the blob when calling this function -// Caller should call Info() for knowing what current values are actually set -func (sr *immutableRef) SetBlob(ctx context.Context, desc ocispec.Descriptor) error { - diffID, err := diffIDFromDescriptor(desc) - if err != nil { - return err - } - if _, err := sr.cm.ContentStore.Info(ctx, desc.Digest); err != nil { - return err - } - - sr.mu.Lock() - defer sr.mu.Unlock() - - if getChainID(sr.md) != "" { - return nil - } - - if err := sr.finalize(ctx, true); err != nil { - return err - } - - p := sr.parent - var parentChainID digest.Digest - var parentBlobChainID digest.Digest - if p != nil { - pInfo := p.Info() - if pInfo.ChainID == "" || pInfo.BlobChainID == "" { - return errors.Errorf("failed to set blob for reference with non-addressable parent") - } - parentChainID = pInfo.ChainID - parentBlobChainID = pInfo.BlobChainID - } - - if err := sr.cm.LeaseManager.AddResource(ctx, leases.Lease{ID: sr.ID()}, leases.Resource{ - ID: desc.Digest.String(), - Type: "content", - }); err != nil { - return err - } - - queueDiffID(sr.md, diffID.String()) - queueBlob(sr.md, desc.Digest.String()) - chainID := diffID - blobChainID := imagespecidentity.ChainID([]digest.Digest{desc.Digest, diffID}) - if parentChainID != "" { - chainID = imagespecidentity.ChainID([]digest.Digest{parentChainID, chainID}) - blobChainID = imagespecidentity.ChainID([]digest.Digest{parentBlobChainID, blobChainID}) - } - queueChainID(sr.md, chainID.String()) - queueBlobChainID(sr.md, blobChainID.String()) - queueMediaType(sr.md, desc.MediaType) - if err := sr.md.Commit(); err != nil { - return err - } - return nil -} - func (sr *immutableRef) Release(ctx context.Context) error { sr.cm.mu.Lock() defer sr.cm.mu.Unlock() @@ -555,7 +678,7 @@ func (sr *mutableRef) commit(ctx context.Context) (*immutableRef, error) { rec := &cacheRecord{ mu: sr.mu, cm: sr.cm, - parent: sr.parentRef(false), + parent: sr.parentRef(false, sr.descHandlers), equalMutable: sr, refs: make(map[ref]struct{}), md: md, @@ -588,13 +711,16 @@ func (sr *mutableRef) commit(ctx context.Context) (*immutableRef, error) { return nil, err } - ref := rec.ref(true) + ref := rec.ref(true, sr.descHandlers) sr.equalImmutable = ref return ref, nil } -func (sr *mutableRef) updatesLastUsed() bool { - return sr.triggerLastUsed +func (sr *mutableRef) Mount(ctx context.Context, readonly bool, s session.Group) (snapshot.Mountable, error) { + sr.mu.Lock() + defer sr.mu.Unlock() + + return sr.mount(ctx, readonly) } func (sr *mutableRef) Commit(ctx context.Context) (ImmutableRef, error) { @@ -633,11 +759,10 @@ func (sr *mutableRef) release(ctx context.Context) error { } } return sr.remove(ctx, true) - } else { - if sr.updateLastUsed() { - updateLastUsed(sr.md) - sr.triggerLastUsed = false - } + } + if sr.updateLastUsed() { + updateLastUsed(sr.md) + sr.triggerLastUsed = false } return nil } diff --git a/vendor/github.com/moby/buildkit/cache/remote.go b/vendor/github.com/moby/buildkit/cache/remote.go new file mode 100644 index 0000000000000000000000000000000000000000..b74cb2713d9f5b901e7699d6a53a91fb3455ec31 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remote.go @@ -0,0 +1,204 @@ +package cache + +import ( + "context" + "fmt" + "net/url" + "strings" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/reference" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/pull/pullprogress" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +type Unlazier interface { + Unlazy(ctx context.Context) error +} + +// GetRemote gets a *solver.Remote from content store for this ref (potentially pulling lazily). +// Note: Use WorkerRef.GetRemote instead as moby integration requires custom GetRemote implementation. +func (sr *immutableRef) GetRemote(ctx context.Context, createIfNeeded bool, compressionType compression.Type, s session.Group) (*solver.Remote, error) { + ctx, done, err := leaseutil.WithLease(ctx, sr.cm.LeaseManager, leaseutil.MakeTemporary) + if err != nil { + return nil, err + } + defer done(ctx) + + err = sr.computeBlobChain(ctx, createIfNeeded, compressionType, s) + if err != nil { + return nil, err + } + + mprovider := &lazyMultiProvider{mprovider: contentutil.NewMultiProvider(nil)} + remote := &solver.Remote{ + Provider: mprovider, + } + + for _, ref := range sr.parentRefChain() { + desc, err := ref.ociDesc() + if err != nil { + return nil, err + } + + // NOTE: The media type might be missing for some migrated ones + // from before lease based storage. If so, we should detect + // the media type from blob data. + // + // Discussion: https://github.com/moby/buildkit/pull/1277#discussion_r352795429 + if desc.MediaType == "" { + desc.MediaType, err = compression.DetectLayerMediaType(ctx, sr.cm.ContentStore, desc.Digest, false) + if err != nil { + return nil, err + } + } + + // update distribution source annotation for lazy-refs (non-lazy refs + // will already have their dsl stored in the content store, which is + // used by the push handlers) + if isLazy, err := ref.isLazy(ctx); err != nil { + return nil, err + } else if isLazy { + imageRefs := getImageRefs(ref.md) + for _, imageRef := range imageRefs { + refspec, err := reference.Parse(imageRef) + if err != nil { + return nil, err + } + + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + return nil, err + } + + source, repo := u.Hostname(), strings.TrimPrefix(u.Path, "/") + if desc.Annotations == nil { + desc.Annotations = make(map[string]string) + } + dslKey := fmt.Sprintf("%s.%s", "containerd.io/distribution.source", source) + + var existingRepos []string + if existings, ok := desc.Annotations[dslKey]; ok { + existingRepos = strings.Split(existings, ",") + } + addNewRepo := true + for _, existing := range existingRepos { + if existing == repo { + addNewRepo = false + break + } + } + if addNewRepo { + existingRepos = append(existingRepos, repo) + } + desc.Annotations[dslKey] = strings.Join(existingRepos, ",") + } + } + + remote.Descriptors = append(remote.Descriptors, desc) + mprovider.Add(lazyRefProvider{ + ref: ref, + desc: desc, + dh: sr.descHandlers[desc.Digest], + session: s, + }) + } + return remote, nil +} + +type lazyMultiProvider struct { + mprovider *contentutil.MultiProvider + plist []lazyRefProvider +} + +func (mp *lazyMultiProvider) Add(p lazyRefProvider) { + mp.mprovider.Add(p.desc.Digest, p) + mp.plist = append(mp.plist, p) +} + +func (mp *lazyMultiProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + return mp.mprovider.ReaderAt(ctx, desc) +} + +func (mp *lazyMultiProvider) Unlazy(ctx context.Context) error { + eg, egctx := errgroup.WithContext(ctx) + for _, p := range mp.plist { + p := p + eg.Go(func() error { + return p.Unlazy(egctx) + }) + } + return eg.Wait() +} + +type lazyRefProvider struct { + ref *immutableRef + desc ocispec.Descriptor + dh *DescHandler + session session.Group +} + +func (p lazyRefProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + if desc.Digest != p.desc.Digest { + return nil, errdefs.ErrNotFound + } + if err := p.Unlazy(ctx); err != nil { + return nil, err + } + return p.ref.cm.ContentStore.ReaderAt(ctx, desc) +} + +func (p lazyRefProvider) Unlazy(ctx context.Context) error { + _, err := p.ref.cm.unlazyG.Do(ctx, string(p.desc.Digest), func(ctx context.Context) (_ interface{}, rerr error) { + if isLazy, err := p.ref.isLazy(ctx); err != nil { + return nil, err + } else if !isLazy { + return nil, nil + } + + if p.dh == nil { + // shouldn't happen, if you have a lazy immutable ref it already should be validated + // that descriptor handlers exist for it + return nil, errors.New("unexpected nil descriptor handler") + } + + if p.dh.Progress != nil { + var stopProgress func(error) + ctx, stopProgress = p.dh.Progress.Start(ctx) + defer stopProgress(rerr) + } + + // For now, just pull down the whole content and then return a ReaderAt from the local content + // store. If efficient partial reads are desired in the future, something more like a "tee" + // that caches remote partial reads to a local store may need to replace this. + err := contentutil.Copy(ctx, p.ref.cm.ContentStore, &pullprogress.ProviderWithProgress{ + Provider: p.dh.Provider(p.session), + Manager: p.ref.cm.ContentStore, + }, p.desc) + if err != nil { + return nil, err + } + + if imageRefs := getImageRefs(p.ref.md); len(imageRefs) > 0 { + // just use the first image ref, it's arbitrary + imageRef := imageRefs[0] + if GetDescription(p.ref.md) == "" { + queueDescription(p.ref.md, "pulled from "+imageRef) + err := p.ref.md.Commit() + if err != nil { + return nil, err + } + } + } + return nil, err + }) + return err +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/export.go b/vendor/github.com/moby/buildkit/cache/remotecache/export.go index d9d5bab7fdf9751dc354f0cf8a28e915102aca4d..542aa760860fe33a5fb49e0ddfd12c5d0a961c48 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/export.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/export.go @@ -12,6 +12,7 @@ import ( v1 "github.com/moby/buildkit/cache/remotecache/v1" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/progress" digest "github.com/opencontainers/go-digest" @@ -55,20 +56,17 @@ type contentCacheExporter struct { solver.CacheExporterTarget chains *v1.CacheChains ingester content.Ingester + oci bool } -func NewExporter(ingester content.Ingester) Exporter { +func NewExporter(ingester content.Ingester, oci bool) Exporter { cc := v1.NewCacheChains() - return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester} + return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester, oci: oci} } func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string, error) { - return export(ctx, ce.ingester, ce.chains) -} - -func export(ctx context.Context, ingester content.Ingester, cc *v1.CacheChains) (map[string]string, error) { res := make(map[string]string) - config, descs, err := cc.Marshal() + config, descs, err := ce.chains.Marshal() if err != nil { return nil, err } @@ -86,6 +84,9 @@ func export(ctx context.Context, ingester content.Ingester, cc *v1.CacheChains) var mfst manifestList mfst.SchemaVersion = 2 mfst.MediaType = images.MediaTypeDockerSchema2ManifestList + if ce.oci { + mfst.MediaType = ocispec.MediaTypeImageIndex + } for _, l := range config.Layers { dgstPair, ok := descs[l.Blob] @@ -93,13 +94,15 @@ func export(ctx context.Context, ingester content.Ingester, cc *v1.CacheChains) return nil, errors.Errorf("missing blob %s", l.Blob) } layerDone := oneOffProgress(ctx, fmt.Sprintf("writing layer %s", l.Blob)) - if err := contentutil.Copy(ctx, ingester, dgstPair.Provider, dgstPair.Descriptor); err != nil { + if err := contentutil.Copy(ctx, ce.ingester, dgstPair.Provider, dgstPair.Descriptor); err != nil { return nil, layerDone(errors.Wrap(err, "error writing layer blob")) } layerDone(nil) mfst.Manifests = append(mfst.Manifests, dgstPair.Descriptor) } + mfst.Manifests = compression.ConvertAllLayerMediaTypes(ce.oci, mfst.Manifests...) + dt, err := json.Marshal(config) if err != nil { return nil, err @@ -111,7 +114,7 @@ func export(ctx context.Context, ingester content.Ingester, cc *v1.CacheChains) MediaType: v1.CacheConfigMediaTypeV0, } configDone := oneOffProgress(ctx, fmt.Sprintf("writing config %s", dgst)) - if err := content.WriteBlob(ctx, ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil { + if err := content.WriteBlob(ctx, ce.ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil { return nil, configDone(errors.Wrap(err, "error writing config blob")) } configDone(nil) @@ -130,7 +133,7 @@ func export(ctx context.Context, ingester content.Ingester, cc *v1.CacheChains) MediaType: mfst.MediaType, } mfstDone := oneOffProgress(ctx, fmt.Sprintf("writing manifest %s", dgst)) - if err := content.WriteBlob(ctx, ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil { + if err := content.WriteBlob(ctx, ce.ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil { return nil, mfstDone(errors.Wrap(err, "error writing manifest blob")) } descJSON, err := json.Marshal(desc) diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go index dabb23564477222f17365e47d3fcd8039f14730a..2ee194afdfd0afe8517e074bac355d5f1335f557 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go @@ -2,6 +2,7 @@ package local import ( "context" + "strconv" "time" "github.com/containerd/containerd/content" @@ -17,6 +18,7 @@ const ( attrDigest = "digest" attrSrc = "src" attrDest = "dest" + attrOCIMediatypes = "oci-mediatypes" contentStoreIDPrefix = "local:" ) @@ -27,12 +29,20 @@ func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExpor if store == "" { return nil, errors.New("local cache exporter requires dest") } + ociMediatypes := true + if v, ok := attrs[attrOCIMediatypes]; ok { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", attrOCIMediatypes) + } + ociMediatypes = b + } csID := contentStoreIDPrefix + store cs, err := getContentStore(ctx, sm, g, csID) if err != nil { return nil, err } - return remotecache.NewExporter(cs), nil + return remotecache.NewExporter(cs, ociMediatypes), nil } } @@ -76,7 +86,7 @@ func getContentStore(ctx context.Context, sm *session.Manager, g session.Group, timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - caller, err := sm.Get(timeoutCtx, sessionID) + caller, err := sm.Get(timeoutCtx, sessionID, false) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go index e81fcb91bd0d896ec28c55c6b41866d2cc0e5cbf..281d9fa4a3f303e3f8e26833ada6db9eb6b5f578 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go @@ -2,6 +2,7 @@ package registry import ( "context" + "strconv" "github.com/containerd/containerd/content" "github.com/containerd/containerd/remotes/docker" @@ -28,7 +29,8 @@ func canonicalizeRef(rawRef string) (string, error) { } const ( - attrRef = "ref" + attrRef = "ref" + attrOCIMediatypes = "oci-mediatypes" ) func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) remotecache.ResolveCacheExporterFunc { @@ -37,12 +39,20 @@ func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) r if err != nil { return nil, err } - remote := resolver.New(hosts, resolver.NewSessionAuthenticator(sm, g)) + ociMediatypes := true + if v, ok := attrs[attrOCIMediatypes]; ok { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", attrOCIMediatypes) + } + ociMediatypes = b + } + remote := resolver.DefaultPool.GetResolver(hosts, ref, "push", sm, g) pusher, err := remote.Pusher(ctx, ref) if err != nil { return nil, err } - return remotecache.NewExporter(contentutil.FromPusher(pusher)), nil + return remotecache.NewExporter(contentutil.FromPusher(pusher), ociMediatypes), nil } } @@ -52,7 +62,7 @@ func ResolveCacheImporterFunc(sm *session.Manager, cs content.Store, hosts docke if err != nil { return nil, specs.Descriptor{}, err } - remote := resolver.New(hosts, resolver.NewSessionAuthenticator(sm, g)) + remote := resolver.DefaultPool.GetResolver(hosts, ref, "pull", sm, g) xref, desc, err := remote.Resolve(ctx, ref) if err != nil { return nil, specs.Descriptor{}, err diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go index 728b85a2ea83f55283d16071f608a42fddbd96cb..dee02f4b36999bc3d44539bbefc2551e04898645 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go @@ -5,6 +5,7 @@ import ( "time" "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" @@ -260,7 +261,7 @@ func (cs *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) return worker.NewWorkerRefResult(ref, cs.w), nil } -func (cs *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult) (*solver.Remote, error) { +func (cs *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult, _ session.Group) (*solver.Remote, error) { if r := cs.byResultID(res.ID); r != nil && r.result != nil { return r.result, nil } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go index a9b11c3e46171c4079ad2b3003a5c4d233613e38..a76cc6917bdea8e3258e34fff3c50cbfeea9d3d6 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go @@ -23,7 +23,7 @@ func (c *CacheChains) Add(dgst digest.Digest) solver.CacheExporterRecord { if strings.HasPrefix(dgst.String(), "random:") { return &nopRecord{} } - it := &item{c: c, dgst: dgst} + it := &item{c: c, dgst: dgst, backlinks: map[*item]struct{}{}} c.items = append(c.items, it) return it } @@ -44,6 +44,17 @@ func (c *CacheChains) normalize() error { byKey: map[digest.Digest]*item{}, } + validated := make([]*item, 0, len(c.items)) + for _, it := range c.items { + it.validate() + } + for _, it := range c.items { + if !it.invalid { + validated = append(validated, it) + } + } + c.items = validated + for _, it := range c.items { _, err := normalizeItem(it, st) if err != nil { @@ -99,7 +110,9 @@ type item struct { result *solver.Remote resultTime time.Time - links []map[link]struct{} + links []map[link]struct{} + backlinks map[*item]struct{} + invalid bool } type link struct { @@ -126,6 +139,30 @@ func (c *item) LinkFrom(rec solver.CacheExporterRecord, index int, selector stri } c.links[index][link{src: src, selector: selector}] = struct{}{} + src.backlinks[c] = struct{}{} +} + +func (c *item) validate() { + for _, m := range c.links { + if len(m) == 0 { + c.invalid = true + for bl := range c.backlinks { + changed := false + for _, m := range bl.links { + for l := range m { + if l.src == c { + delete(m, l) + changed = true + } + } + } + if changed { + bl.validate() + } + } + return + } + } } func (c *item) walkAllResults(fn func(i *item) error, visited map[*item]struct{}) error { diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go index dfc7fab36a174f1eb7e2f188de9c8692577cc7f9..fc494aa5a0e1ee8020f885a68c4d4d3d78ee07d3 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go @@ -4,10 +4,9 @@ import ( "fmt" "sort" - "github.com/containerd/containerd/content" + "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/solver" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -230,10 +229,6 @@ func marshalRemote(r *solver.Remote, state *marshalState) string { if len(r.Descriptors) == 0 { return "" } - type Remote struct { - Descriptors []ocispec.Descriptor - Provider content.Provider - } var parentID string if len(r.Descriptors) > 1 { r2 := &solver.Remote{ @@ -244,6 +239,10 @@ func marshalRemote(r *solver.Remote, state *marshalState) string { } desc := r.Descriptors[len(r.Descriptors)-1] + if desc.Digest == exptypes.EmptyGZLayer { + return parentID + } + state.descriptors[desc.Digest] = DescriptorProviderPair{ Descriptor: desc, Provider: r.Provider, diff --git a/vendor/github.com/moby/buildkit/cache/util/fsutil.go b/vendor/github.com/moby/buildkit/cache/util/fsutil.go index 41e5465f7fa3e67bc5e9519f738b7eee639e8dfc..b425a002a5422dbde309d06cb982a194337fee59 100644 --- a/vendor/github.com/moby/buildkit/cache/util/fsutil.go +++ b/vendor/github.com/moby/buildkit/cache/util/fsutil.go @@ -8,7 +8,6 @@ import ( "path/filepath" "github.com/containerd/continuity/fs" - "github.com/moby/buildkit/cache" "github.com/moby/buildkit/snapshot" "github.com/pkg/errors" "github.com/tonistiigi/fsutil" @@ -25,12 +24,7 @@ type FileRange struct { Length int } -func withMount(ctx context.Context, ref cache.ImmutableRef, cb func(string) error) error { - mount, err := ref.Mount(ctx, true) - if err != nil { - return err - } - +func withMount(ctx context.Context, mount snapshot.Mountable, cb func(string) error) error { lm := snapshot.LocalMounter(mount) root, err := lm.Mount() @@ -55,10 +49,10 @@ func withMount(ctx context.Context, ref cache.ImmutableRef, cb func(string) erro return nil } -func ReadFile(ctx context.Context, ref cache.ImmutableRef, req ReadRequest) ([]byte, error) { +func ReadFile(ctx context.Context, mount snapshot.Mountable, req ReadRequest) ([]byte, error) { var dt []byte - err := withMount(ctx, ref, func(root string) error { + err := withMount(ctx, mount, func(root string) error { fp, err := fs.RootPath(root, req.Filename) if err != nil { return errors.WithStack(err) @@ -90,7 +84,7 @@ type ReadDirRequest struct { IncludePattern string } -func ReadDir(ctx context.Context, ref cache.ImmutableRef, req ReadDirRequest) ([]*fstypes.Stat, error) { +func ReadDir(ctx context.Context, mount snapshot.Mountable, req ReadDirRequest) ([]*fstypes.Stat, error) { var ( rd []*fstypes.Stat wo fsutil.WalkOpt @@ -98,7 +92,7 @@ func ReadDir(ctx context.Context, ref cache.ImmutableRef, req ReadDirRequest) ([ if req.IncludePattern != "" { wo.IncludePatterns = append(wo.IncludePatterns, req.IncludePattern) } - err := withMount(ctx, ref, func(root string) error { + err := withMount(ctx, mount, func(root string) error { fp, err := fs.RootPath(root, req.Path) if err != nil { return errors.WithStack(err) @@ -123,9 +117,9 @@ func ReadDir(ctx context.Context, ref cache.ImmutableRef, req ReadDirRequest) ([ return rd, err } -func StatFile(ctx context.Context, ref cache.ImmutableRef, path string) (*fstypes.Stat, error) { +func StatFile(ctx context.Context, mount snapshot.Mountable, path string) (*fstypes.Stat, error) { var st *fstypes.Stat - err := withMount(ctx, ref, func(root string) error { + err := withMount(ctx, mount, func(root string) error { fp, err := fs.RootPath(root, path) if err != nil { return errors.WithStack(err) diff --git a/vendor/github.com/moby/buildkit/client/build.go b/vendor/github.com/moby/buildkit/client/build.go index 2518cd7c7a2ab83779b5d7ca147a54f18d63c5f4..4cb91d7aa98ab5a9fb4270e4ce2b2150f1653f4e 100644 --- a/vendor/github.com/moby/buildkit/client/build.go +++ b/vendor/github.com/moby/buildkit/client/build.go @@ -45,11 +45,14 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF } cb := func(ref string, s *session.Session) error { - g, err := grpcclient.New(ctx, feOpts, s.ID(), product, c.gatewayClientForBuild(ref), gworkers) + gwClient := c.gatewayClientForBuild(ref) + g, err := grpcclient.New(ctx, feOpts, s.ID(), product, gwClient, gworkers) if err != nil { return err } + gwClient.caps = g.BuildOpts().Caps + if err := g.Run(ctx, buildFunc); err != nil { return errors.Wrap(err, "failed to run Build function") } @@ -59,14 +62,18 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF return c.solve(ctx, nil, cb, opt, statusChan) } -func (c *Client) gatewayClientForBuild(buildid string) gatewayapi.LLBBridgeClient { +func (c *Client) gatewayClientForBuild(buildid string) *gatewayClientForBuild { g := gatewayapi.NewLLBBridgeClient(c.conn) - return &gatewayClientForBuild{g, buildid} + return &gatewayClientForBuild{ + gateway: g, + buildID: buildid, + } } type gatewayClientForBuild struct { gateway gatewayapi.LLBBridgeClient buildID string + caps apicaps.CapSet } func (g *gatewayClientForBuild) ResolveImageConfig(ctx context.Context, in *gatewayapi.ResolveImageConfigRequest, opts ...grpc.CallOption) (*gatewayapi.ResolveImageConfigResponse, error) { @@ -85,11 +92,17 @@ func (g *gatewayClientForBuild) ReadFile(ctx context.Context, in *gatewayapi.Rea } func (g *gatewayClientForBuild) ReadDir(ctx context.Context, in *gatewayapi.ReadDirRequest, opts ...grpc.CallOption) (*gatewayapi.ReadDirResponse, error) { + if err := g.caps.Supports(gatewayapi.CapReadDir); err != nil { + return nil, err + } ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) return g.gateway.ReadDir(ctx, in, opts...) } func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.StatFileRequest, opts ...grpc.CallOption) (*gatewayapi.StatFileResponse, error) { + if err := g.caps.Supports(gatewayapi.CapStatFile); err != nil { + return nil, err + } ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) return g.gateway.StatFile(ctx, in, opts...) } @@ -105,6 +118,33 @@ func (g *gatewayClientForBuild) Return(ctx context.Context, in *gatewayapi.Retur } func (g *gatewayClientForBuild) Inputs(ctx context.Context, in *gatewayapi.InputsRequest, opts ...grpc.CallOption) (*gatewayapi.InputsResponse, error) { + if err := g.caps.Supports(gatewayapi.CapFrontendInputs); err != nil { + return nil, err + } ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) return g.gateway.Inputs(ctx, in, opts...) } + +func (g *gatewayClientForBuild) NewContainer(ctx context.Context, in *gatewayapi.NewContainerRequest, opts ...grpc.CallOption) (*gatewayapi.NewContainerResponse, error) { + if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil { + return nil, err + } + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + return g.gateway.NewContainer(ctx, in, opts...) +} + +func (g *gatewayClientForBuild) ReleaseContainer(ctx context.Context, in *gatewayapi.ReleaseContainerRequest, opts ...grpc.CallOption) (*gatewayapi.ReleaseContainerResponse, error) { + if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil { + return nil, err + } + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + return g.gateway.ReleaseContainer(ctx, in, opts...) +} + +func (g *gatewayClientForBuild) ExecProcess(ctx context.Context, opts ...grpc.CallOption) (gatewayapi.LLBBridge_ExecProcessClient, error) { + if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil { + return nil, err + } + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + return g.gateway.ExecProcess(ctx, opts...) +} diff --git a/vendor/github.com/moby/buildkit/client/client.go b/vendor/github.com/moby/buildkit/client/client.go index 0546f4653c6b258bf2295eaf320d9819690ef6be..38429ff5bbb1bddeef96b0fc8791aafe272c58e4 100644 --- a/vendor/github.com/moby/buildkit/client/client.go +++ b/vendor/github.com/moby/buildkit/client/client.go @@ -6,8 +6,9 @@ import ( "crypto/x509" "io/ioutil" "net" - "time" + "net/url" + "github.com/containerd/containerd/defaults" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" controlapi "github.com/moby/buildkit/api/services/control" @@ -30,7 +31,10 @@ type ClientOpt interface{} // New returns a new buildkit client. Address can be empty for the system-default address. func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) { - gopts := []grpc.DialOption{} + gopts := []grpc.DialOption{ + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)), + } needDialer := true needWithInsecure := true @@ -54,7 +58,7 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error stream = append(stream, otgrpc.OpenTracingStreamClientInterceptor(wt.tracer)) } if wd, ok := o.(*withDialer); ok { - gopts = append(gopts, grpc.WithDialer(wd.dialer)) + gopts = append(gopts, grpc.WithContextDialer(wd.dialer)) needDialer = false } } @@ -63,9 +67,7 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error if err != nil { return nil, err } - // TODO(AkihiroSuda): use WithContextDialer (requires grpc 1.19) - // https://github.com/grpc/grpc-go/commit/40cb5618f475e7b9d61aa7920ae4b04ef9bbaf89 - gopts = append(gopts, grpc.WithDialer(dialFn)) + gopts = append(gopts, grpc.WithContextDialer(dialFn)) } if needWithInsecure { gopts = append(gopts, grpc.WithInsecure()) @@ -74,6 +76,15 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error address = appdefaults.Address } + // grpc-go uses a slightly different naming scheme: https://github.com/grpc/grpc/blob/master/doc/naming.md + // This will end up setting rfc non-complient :authority header to address string (e.g. tcp://127.0.0.1:1234). + // So, here sets right authority header via WithAuthority DialOption. + addressURL, err := url.Parse(address) + if err != nil { + return nil, err + } + gopts = append(gopts, grpc.WithAuthority(addressURL.Host)) + unary = append(unary, grpcerrors.UnaryClientInterceptor) stream = append(stream, grpcerrors.StreamClientInterceptor) @@ -118,10 +129,10 @@ func WithFailFast() ClientOpt { } type withDialer struct { - dialer func(string, time.Duration) (net.Conn, error) + dialer func(context.Context, string) (net.Conn, error) } -func WithDialer(df func(string, time.Duration) (net.Conn, error)) ClientOpt { +func WithContextDialer(df func(context.Context, string) (net.Conn, error)) ClientOpt { return &withDialer{dialer: df} } @@ -179,17 +190,13 @@ type withTracer struct { tracer opentracing.Tracer } -func resolveDialer(address string) (func(string, time.Duration) (net.Conn, error), error) { +func resolveDialer(address string) (func(context.Context, string) (net.Conn, error), error) { ch, err := connhelper.GetConnectionHelper(address) if err != nil { return nil, err } if ch != nil { - f := func(a string, _ time.Duration) (net.Conn, error) { - ctx := context.Background() - return ch.ContextDialer(ctx, a) - } - return f, nil + return ch.ContextDialer, nil } // basic dialer return dialer, nil diff --git a/vendor/github.com/moby/buildkit/client/client_unix.go b/vendor/github.com/moby/buildkit/client/client_unix.go index 93afb956f1b0cec04ec34d29249f073d88b3917c..888a8173ad06a93ea2c74613a443e770e62359d4 100644 --- a/vendor/github.com/moby/buildkit/client/client_unix.go +++ b/vendor/github.com/moby/buildkit/client/client_unix.go @@ -3,17 +3,18 @@ package client import ( + "context" "net" "strings" - "time" "github.com/pkg/errors" ) -func dialer(address string, timeout time.Duration) (net.Conn, error) { +func dialer(ctx context.Context, address string) (net.Conn, error) { addrParts := strings.SplitN(address, "://", 2) if len(addrParts) != 2 { return nil, errors.Errorf("invalid address %s", address) } - return net.DialTimeout(addrParts[0], addrParts[1], timeout) + var d net.Dialer + return d.DialContext(ctx, addrParts[0], addrParts[1]) } diff --git a/vendor/github.com/moby/buildkit/client/client_windows.go b/vendor/github.com/moby/buildkit/client/client_windows.go index d0d8a1b4085f70331cc6214990229d2973366c01..a9eb87f2483fa29a4e8fbd30d0fa47cc7abcfaae 100644 --- a/vendor/github.com/moby/buildkit/client/client_windows.go +++ b/vendor/github.com/moby/buildkit/client/client_windows.go @@ -1,15 +1,15 @@ package client import ( + "context" "net" "strings" - "time" winio "github.com/Microsoft/go-winio" "github.com/pkg/errors" ) -func dialer(address string, timeout time.Duration) (net.Conn, error) { +func dialer(ctx context.Context, address string) (net.Conn, error) { addrParts := strings.SplitN(address, "://", 2) if len(addrParts) != 2 { return nil, errors.Errorf("invalid address %s", address) @@ -17,8 +17,9 @@ func dialer(address string, timeout time.Duration) (net.Conn, error) { switch addrParts[0] { case "npipe": address = strings.Replace(addrParts[1], "/", "\\", -1) - return winio.DialPipe(address, &timeout) + return winio.DialPipeContext(ctx, address) default: - return net.DialTimeout(addrParts[0], addrParts[1], timeout) + var d net.Dialer + return d.DialContext(ctx, addrParts[0], addrParts[1]) } } diff --git a/vendor/github.com/moby/buildkit/client/llb/definition.go b/vendor/github.com/moby/buildkit/client/llb/definition.go index fe9f7c17f87f6fc803c69d34a849e250d30a48a7..99af7c6879656064bff5f49fb428e865a519edff 100644 --- a/vendor/github.com/moby/buildkit/client/llb/definition.go +++ b/vendor/github.com/moby/buildkit/client/llb/definition.go @@ -16,14 +16,15 @@ import ( // LLB state can be reconstructed from the definition. type DefinitionOp struct { MarshalCache - mu sync.Mutex - ops map[digest.Digest]*pb.Op - defs map[digest.Digest][]byte - metas map[digest.Digest]pb.OpMetadata - sources map[digest.Digest][]*SourceLocation - platforms map[digest.Digest]*specs.Platform - dgst digest.Digest - index pb.OutputIndex + mu sync.Mutex + ops map[digest.Digest]*pb.Op + defs map[digest.Digest][]byte + metas map[digest.Digest]pb.OpMetadata + sources map[digest.Digest][]*SourceLocation + platforms map[digest.Digest]*specs.Platform + dgst digest.Digest + index pb.OutputIndex + inputCache map[digest.Digest][]*DefinitionOp } // NewDefinitionOp returns a new operation from a marshalled definition. @@ -89,13 +90,14 @@ func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) { } return &DefinitionOp{ - ops: ops, - defs: defs, - metas: def.Metadata, - sources: srcs, - platforms: platforms, - dgst: dgst, - index: index, + ops: ops, + defs: defs, + metas: def.Metadata, + sources: srcs, + platforms: platforms, + dgst: dgst, + index: index, + inputCache: make(map[digest.Digest][]*DefinitionOp), }, nil } @@ -188,14 +190,34 @@ func (d *DefinitionOp) Inputs() []Output { d.mu.Unlock() for _, input := range op.Inputs { - vtx := &DefinitionOp{ - ops: d.ops, - defs: d.defs, - metas: d.metas, - platforms: d.platforms, - dgst: input.Digest, - index: input.Index, + var vtx *DefinitionOp + d.mu.Lock() + if existingIndexes, ok := d.inputCache[input.Digest]; ok { + if int(input.Index) < len(existingIndexes) && existingIndexes[input.Index] != nil { + vtx = existingIndexes[input.Index] + } } + if vtx == nil { + vtx = &DefinitionOp{ + ops: d.ops, + defs: d.defs, + metas: d.metas, + platforms: d.platforms, + dgst: input.Digest, + index: input.Index, + inputCache: d.inputCache, + } + existingIndexes := d.inputCache[input.Digest] + indexDiff := int(input.Index) - len(existingIndexes) + if indexDiff >= 0 { + // make room in the slice for the new index being set + existingIndexes = append(existingIndexes, make([]*DefinitionOp, indexDiff+1)...) + } + existingIndexes[input.Index] = vtx + d.inputCache[input.Digest] = existingIndexes + } + d.mu.Unlock() + inputs = append(inputs, &output{vertex: vtx, platform: platform, getIndex: func() (pb.OutputIndex, error) { return pb.OutputIndex(vtx.index), nil }}) diff --git a/vendor/github.com/moby/buildkit/client/llb/exec.go b/vendor/github.com/moby/buildkit/client/llb/exec.go index 9d27bbcd1c92d5dfa8e8905f715df679e15650f1..decc0d7407f7cc43f69cf0496e0347eac3101ebc 100644 --- a/vendor/github.com/moby/buildkit/client/llb/exec.go +++ b/vendor/github.com/moby/buildkit/client/llb/exec.go @@ -2,7 +2,7 @@ package llb import ( "context" - _ "crypto/sha256" + _ "crypto/sha256" // for opencontainers/go-digest "fmt" "net" "sort" @@ -153,7 +153,13 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] } if c.Caps != nil { if err := c.Caps.Supports(pb.CapExecMetaSetsDefaultPath); err != nil { - env = env.SetDefault("PATH", system.DefaultPathEnv) + os := "linux" + if c.Platform != nil { + os = c.Platform.OS + } else if e.constraints.Platform != nil { + os = e.constraints.Platform.OS + } + env = env.SetDefault("PATH", system.DefaultPathEnv(os)) } else { addCap(&e.constraints, pb.CapExecMetaSetsDefaultPath) } @@ -174,11 +180,17 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] return "", nil, nil, nil, err } + hostname, err := getHostname(e.base)(ctx) + if err != nil { + return "", nil, nil, nil, err + } + meta := &pb.Meta{ - Args: args, - Env: env.ToArray(), - Cwd: cwd, - User: user, + Args: args, + Env: env.ToArray(), + Cwd: cwd, + User: user, + Hostname: hostname, } extraHosts, err := getExtraHosts(e.base)(ctx) if err != nil { @@ -217,9 +229,9 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] if p := e.proxyEnv; p != nil { peo.Meta.ProxyEnv = &pb.ProxyEnv{ - HttpProxy: p.HttpProxy, - HttpsProxy: p.HttpsProxy, - FtpProxy: p.FtpProxy, + HttpProxy: p.HTTPProxy, + HttpsProxy: p.HTTPSProxy, + FtpProxy: p.FTPProxy, NoProxy: p.NoProxy, } addCap(&e.constraints, pb.CapExecMetaProxy) @@ -629,9 +641,9 @@ type MountInfo struct { } type ProxyEnv struct { - HttpProxy string - HttpsProxy string - FtpProxy string + HTTPProxy string + HTTPSProxy string + FTPProxy string NoProxy string } diff --git a/vendor/github.com/moby/buildkit/client/llb/fileop.go b/vendor/github.com/moby/buildkit/client/llb/fileop.go index db5ed00dd20b02afcb1d03024ef168a96b061658..fdcf9929a8a57a2fd0b81ff071a2fec0ff1c4d85 100644 --- a/vendor/github.com/moby/buildkit/client/llb/fileop.go +++ b/vendor/github.com/moby/buildkit/client/llb/fileop.go @@ -2,7 +2,7 @@ package llb import ( "context" - _ "crypto/sha256" + _ "crypto/sha256" // for opencontainers/go-digest "os" "path" "strconv" @@ -252,13 +252,13 @@ func (co ChownOpt) SetCopyOption(mi *CopyInfo) { mi.ChownOpt = &co } -func (cp *ChownOpt) marshal(base pb.InputIndex) *pb.ChownOpt { - if cp == nil { +func (co *ChownOpt) marshal(base pb.InputIndex) *pb.ChownOpt { + if co == nil { return nil } return &pb.ChownOpt{ - User: cp.User.marshal(base), - Group: cp.Group.marshal(base), + User: co.User.marshal(base), + Group: co.Group.marshal(base), } } @@ -476,17 +476,17 @@ func (a *fileActionCopy) toProtoAction(ctx context.Context, parent string, base }, nil } -func (c *fileActionCopy) sourcePath(ctx context.Context) (string, error) { - p := path.Clean(c.src) +func (a *fileActionCopy) sourcePath(ctx context.Context) (string, error) { + p := path.Clean(a.src) if !path.IsAbs(p) { - if c.state != nil { - dir, err := c.state.GetDir(ctx) + if a.state != nil { + dir, err := a.state.GetDir(ctx) if err != nil { return "", err } p = path.Join("/", dir, p) - } else if c.fas != nil { - dir, err := c.fas.state.GetDir(ctx) + } else if a.fas != nil { + dir, err := a.fas.state.GetDir(ctx) if err != nil { return "", err } diff --git a/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go b/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go index 0dbd4737aad40ff738d7111242ebeef7d7303198..97a0cba731cd7ac122b489fc90f7748fe056a508 100644 --- a/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go +++ b/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go @@ -8,10 +8,10 @@ import ( "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" - "github.com/docker/docker/pkg/locker" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/imageutil" + "github.com/moby/locker" digest "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go/v1" ) diff --git a/vendor/github.com/moby/buildkit/client/llb/meta.go b/vendor/github.com/moby/buildkit/client/llb/meta.go index ab0f59328eb74423ef25ee903a5e9a5b933ff2ef..80cc18dab1c72e8c83bf553892c610cc4f60b56e 100644 --- a/vendor/github.com/moby/buildkit/client/llb/meta.go +++ b/vendor/github.com/moby/buildkit/client/llb/meta.go @@ -19,6 +19,7 @@ var ( keyDir = contextKeyT("llb.exec.dir") keyEnv = contextKeyT("llb.exec.env") keyUser = contextKeyT("llb.exec.user") + keyHostname = contextKeyT("llb.exec.hostname") keyExtraHost = contextKeyT("llb.exec.extrahost") keyPlatform = contextKeyT("llb.platform") keyNetwork = contextKeyT("llb.network") @@ -143,6 +144,25 @@ func getUser(s State) func(context.Context) (string, error) { } } +func Hostname(str string) StateOption { + return func(s State) State { + return s.WithValue(keyHostname, str) + } +} + +func getHostname(s State) func(context.Context) (string, error) { + return func(ctx context.Context) (string, error) { + v, err := s.getValue(keyHostname)(ctx) + if err != nil { + return "", err + } + if v != nil { + return v.(string), nil + } + return "", nil + } +} + func args(args ...string) StateOption { return func(s State) State { return s.WithValue(keyArgs, args) @@ -155,7 +175,7 @@ func shlexf(str string, replace bool, v ...interface{}) StateOption { } return func(s State) State { arg, err := shlex.Split(str) - if err != nil { + if err != nil { //nolint // TODO: handle error } return args(arg...)(s) diff --git a/vendor/github.com/moby/buildkit/client/llb/source.go b/vendor/github.com/moby/buildkit/client/llb/source.go index 51fdcf9235983134d5bd710d0dad76b30ce29355..7cb9b4ed6a980338c498999041257aff6aba8f82 100644 --- a/vendor/github.com/moby/buildkit/client/llb/source.go +++ b/vendor/github.com/moby/buildkit/client/llb/source.go @@ -2,7 +2,7 @@ package llb import ( "context" - _ "crypto/sha256" + _ "crypto/sha256" // for opencontainers/go-digest "encoding/json" "os" "strconv" @@ -233,11 +233,15 @@ func Git(remote, ref string, opts ...GitOption) State { } if gi.AuthTokenSecret != "" { attrs[pb.AttrAuthTokenSecret] = gi.AuthTokenSecret - addCap(&gi.Constraints, pb.CapSourceGitHttpAuth) + if gi.addAuthCap { + addCap(&gi.Constraints, pb.CapSourceGitHTTPAuth) + } } if gi.AuthHeaderSecret != "" { attrs[pb.AttrAuthHeaderSecret] = gi.AuthHeaderSecret - addCap(&gi.Constraints, pb.CapSourceGitHttpAuth) + if gi.addAuthCap { + addCap(&gi.Constraints, pb.CapSourceGitHTTPAuth) + } } addCap(&gi.Constraints, pb.CapSourceGit) @@ -260,6 +264,7 @@ type GitInfo struct { KeepGitDir bool AuthTokenSecret string AuthHeaderSecret string + addAuthCap bool } func KeepGitDir() GitOption { @@ -271,12 +276,14 @@ func KeepGitDir() GitOption { func AuthTokenSecret(v string) GitOption { return gitOptionFunc(func(gi *GitInfo) { gi.AuthTokenSecret = v + gi.addAuthCap = true }) } func AuthHeaderSecret(v string) GitOption { return gitOptionFunc(func(gi *GitInfo) { gi.AuthHeaderSecret = v + gi.addAuthCap = true }) } diff --git a/vendor/github.com/moby/buildkit/client/llb/state.go b/vendor/github.com/moby/buildkit/client/llb/state.go index dac9b8713e3c12e4b8248844491f02fc455f30b7..eca7164daadf8c700596165b4f94377c304edfc6 100644 --- a/vendor/github.com/moby/buildkit/client/llb/state.go +++ b/vendor/github.com/moby/buildkit/client/llb/state.go @@ -320,6 +320,14 @@ func (s State) User(v string) State { return User(v)(s) } +func (s State) Hostname(v string) State { + return Hostname(v)(s) +} + +func (s State) GetHostname(ctx context.Context) (string, error) { + return getHostname(s)(ctx) +} + func (s State) Platform(p specs.Platform) State { return platform(p)(s) } diff --git a/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go b/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go index 13f1d507642447f0bcc1583653395b81b98ff591..7bc583443aaef224ff62ef9e90637eb6fa8728ce 100644 --- a/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go +++ b/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go @@ -6,7 +6,7 @@ import ( "os" "github.com/gofrs/flock" - "github.com/opencontainers/image-spec/specs-go/v1" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) diff --git a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go index fc2da18fcc2febfa512eabc2b02bab573a2f8e3c..fe3e7ffa0dcf3a9dc61144eddd536c119e587dfb 100644 --- a/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go +++ b/vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go @@ -1,5 +1,7 @@ package config +import "github.com/BurntSushi/toml" + // Config provides containerd configuration data for the server type Config struct { Debug bool `toml:"debug"` @@ -78,7 +80,13 @@ type OCIConfig struct { // incomplete and the intention is to make it default without config. UserRemapUnsupported string `toml:"userRemapUnsupported"` // For use in storing the OCI worker binary name that will replace buildkit-runc - Binary string `toml:"binary"` + Binary string `toml:"binary"` + ProxySnapshotterPath string `toml:"proxySnapshotterPath"` + + // StargzSnapshotterConfig is configuration for stargz snapshotter. + // Decoding this is delayed in order to remove the dependency from this + // config pkg to stargz snapshotter's config pkg. + StargzSnapshotterConfig toml.Primitive `toml:"stargzSnapshotter"` } type ContainerdConfig struct { @@ -89,6 +97,7 @@ type ContainerdConfig struct { Namespace string `toml:"namespace"` GCConfig NetworkConfig + Snapshotter string `toml:"snapshotter"` } type GCPolicy struct { diff --git a/vendor/github.com/moby/buildkit/control/control.go b/vendor/github.com/moby/buildkit/control/control.go index 0ee110ed39ffcb6df490b3e2ca867d02752f7a57..048e6966834db59d28b3f7460dd28826913874dd 100644 --- a/vendor/github.com/moby/buildkit/control/control.go +++ b/vendor/github.com/moby/buildkit/control/control.go @@ -63,7 +63,7 @@ func NewController(opt Opt) (*Controller, error) { cache: cache, gatewayForwarder: gatewayForwarder, } - c.throttledGC = throttle.ThrottleAfter(time.Minute, c.gc) + c.throttledGC = throttle.After(time.Minute, c.gc) defer func() { time.AfterFunc(time.Second, c.throttledGC) @@ -305,40 +305,56 @@ func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Con if !ok { return nil } - sr := controlapi.StatusResponse{} - for _, v := range ss.Vertexes { - sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{ - Digest: v.Digest, - Inputs: v.Inputs, - Name: v.Name, - Started: v.Started, - Completed: v.Completed, - Error: v.Error, - Cached: v.Cached, - }) - } - for _, v := range ss.Statuses { - sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{ - ID: v.ID, - Vertex: v.Vertex, - Name: v.Name, - Current: v.Current, - Total: v.Total, - Timestamp: v.Timestamp, - Started: v.Started, - Completed: v.Completed, - }) - } - for _, v := range ss.Logs { - sr.Logs = append(sr.Logs, &controlapi.VertexLog{ - Vertex: v.Vertex, - Stream: int64(v.Stream), - Msg: v.Data, - Timestamp: v.Timestamp, - }) - } - if err := stream.SendMsg(&sr); err != nil { - return err + logSize := 0 + retry := false + for { + sr := controlapi.StatusResponse{} + for _, v := range ss.Vertexes { + sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{ + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + }) + } + for _, v := range ss.Statuses { + sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{ + ID: v.ID, + Vertex: v.Vertex, + Name: v.Name, + Current: v.Current, + Total: v.Total, + Timestamp: v.Timestamp, + Started: v.Started, + Completed: v.Completed, + }) + } + for i, v := range ss.Logs { + sr.Logs = append(sr.Logs, &controlapi.VertexLog{ + Vertex: v.Vertex, + Stream: int64(v.Stream), + Msg: v.Data, + Timestamp: v.Timestamp, + }) + logSize += len(v.Data) + // avoid logs growing big and split apart if they do + if logSize > 1024*1024 { + ss.Vertexes = nil + ss.Statuses = nil + ss.Logs = ss.Logs[i+1:] + retry = true + break + } + } + if err := stream.SendMsg(&sr); err != nil { + return err + } + if !retry { + break + } } } }) diff --git a/vendor/github.com/moby/buildkit/control/gateway/gateway.go b/vendor/github.com/moby/buildkit/control/gateway/gateway.go index 2d4e8b38c771b4f28ff3f4a120bbc051416e898e..9b91a08e0f8f10918afada69ea9f9411934e6315 100644 --- a/vendor/github.com/moby/buildkit/control/gateway/gateway.go +++ b/vendor/github.com/moby/buildkit/control/gateway/gateway.go @@ -152,3 +152,27 @@ func (gwf *GatewayForwarder) StatFile(ctx context.Context, req *gwapi.StatFileRe } return fwd.StatFile(ctx, req) } + +func (gwf *GatewayForwarder) NewContainer(ctx context.Context, req *gwapi.NewContainerRequest) (*gwapi.NewContainerResponse, error) { + fwd, err := gwf.lookupForwarder(ctx) + if err != nil { + return nil, errors.Wrap(err, "forwarding NewContainer") + } + return fwd.NewContainer(ctx, req) +} + +func (gwf *GatewayForwarder) ReleaseContainer(ctx context.Context, req *gwapi.ReleaseContainerRequest) (*gwapi.ReleaseContainerResponse, error) { + fwd, err := gwf.lookupForwarder(ctx) + if err != nil { + return nil, errors.Wrap(err, "forwarding ReleaseContainer") + } + return fwd.ReleaseContainer(ctx, req) +} + +func (gwf *GatewayForwarder) ExecProcess(srv gwapi.LLBBridge_ExecProcessServer) error { + fwd, err := gwf.lookupForwarder(srv.Context()) + if err != nil { + return errors.Wrap(err, "forwarding ExecProcess") + } + return fwd.ExecProcess(srv) +} diff --git a/vendor/github.com/moby/buildkit/executor/executor.go b/vendor/github.com/moby/buildkit/executor/executor.go index 5ab425253a1c26d2f8dfdd053e3cf216796c3ebc..8fbe4a9234862103d3c398aa87d44cc482499b27 100644 --- a/vendor/github.com/moby/buildkit/executor/executor.go +++ b/vendor/github.com/moby/buildkit/executor/executor.go @@ -5,7 +5,7 @@ import ( "io" "net" - "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/pb" ) @@ -14,6 +14,7 @@ type Meta struct { Env []string User string Cwd string + Hostname string Tty bool ReadonlyRootFS bool ExtraHosts []HostIP @@ -21,24 +22,34 @@ type Meta struct { SecurityMode pb.SecurityMode } +type Mountable interface { + Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) +} + type Mount struct { - Src cache.Mountable + Src Mountable Selector string Dest string Readonly bool } +type WinSize struct { + Rows uint32 + Cols uint32 +} + type ProcessInfo struct { Meta Meta Stdin io.ReadCloser Stdout, Stderr io.WriteCloser + Resize <-chan WinSize } type Executor interface { // Run will start a container for the given process with rootfs, mounts. // `id` is an optional name for the container so it can be referenced later via Exec. // `started` is an optional channel that will be closed when the container setup completes and has started running. - Run(ctx context.Context, id string, rootfs cache.Mountable, mounts []Mount, process ProcessInfo, started chan<- struct{}) error + Run(ctx context.Context, id string, rootfs Mount, mounts []Mount, process ProcessInfo, started chan<- struct{}) error // Exec will start a process in container matching `id`. An error will be returned // if the container failed to start (via Run) or has exited before Exec is called. Exec(ctx context.Context, id string, process ProcessInfo) error diff --git a/vendor/github.com/moby/buildkit/executor/oci/hosts.go b/vendor/github.com/moby/buildkit/executor/oci/hosts.go index 552b5851e0461cb774b22c9502f6506e419dd3d2..d0505c28ccd92a85ee2f353c687250ada12a0df1 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/hosts.go +++ b/vendor/github.com/moby/buildkit/executor/oci/hosts.go @@ -14,28 +14,26 @@ import ( "github.com/pkg/errors" ) -const hostsContent = ` -127.0.0.1 localhost buildkitsandbox -::1 localhost ip6-localhost ip6-loopback -` +const defaultHostname = "buildkitsandbox" -func GetHostsFile(ctx context.Context, stateDir string, extraHosts []executor.HostIP, idmap *idtools.IdentityMapping) (string, func(), error) { - if len(extraHosts) == 0 { - _, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) { - _, _, err := makeHostsFile(stateDir, nil, idmap) - return nil, err - }) - if err != nil { - return "", nil, err - } - return filepath.Join(stateDir, "hosts"), func() {}, nil +func GetHostsFile(ctx context.Context, stateDir string, extraHosts []executor.HostIP, idmap *idtools.IdentityMapping, hostname string) (string, func(), error) { + if len(extraHosts) != 0 || hostname != defaultHostname { + return makeHostsFile(stateDir, extraHosts, idmap, hostname) + } + + _, err := g.Do(ctx, stateDir, func(ctx context.Context) (interface{}, error) { + _, _, err := makeHostsFile(stateDir, nil, idmap, hostname) + return nil, err + }) + if err != nil { + return "", nil, err } - return makeHostsFile(stateDir, extraHosts, idmap) + return filepath.Join(stateDir, "hosts"), func() {}, nil } -func makeHostsFile(stateDir string, extraHosts []executor.HostIP, idmap *idtools.IdentityMapping) (string, func(), error) { +func makeHostsFile(stateDir string, extraHosts []executor.HostIP, idmap *idtools.IdentityMapping, hostname string) (string, func(), error) { p := filepath.Join(stateDir, "hosts") - if len(extraHosts) != 0 { + if len(extraHosts) != 0 || hostname != defaultHostname { p += "." + identity.NewID() } _, err := os.Stat(p) @@ -47,8 +45,7 @@ func makeHostsFile(stateDir string, extraHosts []executor.HostIP, idmap *idtools } b := &bytes.Buffer{} - - if _, err := b.Write([]byte(hostsContent)); err != nil { + if _, err := b.Write([]byte(initHostsFile(hostname))); err != nil { return "", nil, err } @@ -77,3 +74,14 @@ func makeHostsFile(stateDir string, extraHosts []executor.HostIP, idmap *idtools os.RemoveAll(p) }, nil } + +func initHostsFile(hostname string) string { + var hosts string + if hostname != "" { + hosts = fmt.Sprintf("127.0.0.1 localhost %s", hostname) + } else { + hosts = fmt.Sprintf("127.0.0.1 localhost %s", defaultHostname) + } + hosts = fmt.Sprintf("%s\n::1 localhost ip6-localhost ip6-loopback\n", hosts) + return hosts +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/mounts.go b/vendor/github.com/moby/buildkit/executor/oci/mounts.go index 62dbd388d5af6f0630ee9508e182e2663518966c..62360f4663347ecdb86071118a83a66141500181 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/mounts.go +++ b/vendor/github.com/moby/buildkit/executor/oci/mounts.go @@ -5,82 +5,57 @@ import ( "path/filepath" "strings" + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/oci" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) -// MountOpts sets oci spec specific info for mount points -type MountOpts func([]specs.Mount) ([]specs.Mount, error) - -//GetMounts returns default required for buildkit -// https://github.com/moby/buildkit/issues/429 -func GetMounts(ctx context.Context, mountOpts ...MountOpts) ([]specs.Mount, error) { - mounts := []specs.Mount{ - { - Destination: "/proc", - Type: "proc", - Source: "proc", - }, - { - Destination: "/dev", - Type: "tmpfs", - Source: "tmpfs", - Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, - }, - { - Destination: "/dev/pts", - Type: "devpts", - Source: "devpts", - Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, - }, - { - Destination: "/dev/shm", - Type: "tmpfs", - Source: "shm", - Options: []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"}, - }, - { - Destination: "/dev/mqueue", - Type: "mqueue", - Source: "mqueue", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - { - Destination: "/sys", - Type: "sysfs", - Source: "sysfs", - Options: []string{"nosuid", "noexec", "nodev", "ro"}, - }, - } - var err error - for _, o := range mountOpts { - mounts, err = o(mounts) - if err != nil { - return nil, err +func withRemovedMount(destination string) oci.SpecOpts { + return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { + newMounts := []specs.Mount{} + for _, o := range s.Mounts { + if o.Destination != destination { + newMounts = append(newMounts, o) + } } + s.Mounts = newMounts + + return nil } - return mounts, nil } -func withROBind(src, dest string) func(m []specs.Mount) ([]specs.Mount, error) { - return func(m []specs.Mount) ([]specs.Mount, error) { - m = append(m, specs.Mount{ +func withROBind(src, dest string) oci.SpecOpts { + return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { + s.Mounts = append(s.Mounts, specs.Mount{ Destination: dest, Type: "bind", Source: src, Options: []string{"nosuid", "noexec", "nodev", "rbind", "ro"}, }) - return m, nil + return nil + } +} + +func withCGroup() oci.SpecOpts { + return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"ro", "nosuid", "noexec", "nodev"}, + }) + return nil } + } func hasPrefix(p, prefixDir string) bool { prefixDir = filepath.Clean(prefixDir) - if prefixDir == "/" { + if filepath.Base(prefixDir) == string(filepath.Separator) { return true } p = filepath.Clean(p) - return p == prefixDir || strings.HasPrefix(p, prefixDir+"/") + return p == prefixDir || strings.HasPrefix(p, prefixDir+string(filepath.Separator)) } func removeMountsWithPrefix(mounts []specs.Mount, prefixDir string) []specs.Mount { @@ -93,25 +68,35 @@ func removeMountsWithPrefix(mounts []specs.Mount, prefixDir string) []specs.Moun return ret } -func withProcessMode(processMode ProcessMode) func([]specs.Mount) ([]specs.Mount, error) { - return func(m []specs.Mount) ([]specs.Mount, error) { - switch processMode { - case ProcessSandbox: - // keep the default - case NoProcessSandbox: - m = removeMountsWithPrefix(m, "/proc") - procMount := specs.Mount{ - Destination: "/proc", - Type: "bind", - Source: "/proc", - // NOTE: "rbind"+"ro" does not make /proc read-only recursively. - // So we keep maskedPath and readonlyPaths (although not mandatory for rootless mode) - Options: []string{"rbind"}, +func withBoundProc() oci.SpecOpts { + return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { + s.Mounts = removeMountsWithPrefix(s.Mounts, "/proc") + procMount := specs.Mount{ + Destination: "/proc", + Type: "bind", + Source: "/proc", + // NOTE: "rbind"+"ro" does not make /proc read-only recursively. + // So we keep maskedPath and readonlyPaths (although not mandatory for rootless mode) + Options: []string{"rbind"}, + } + s.Mounts = append([]specs.Mount{procMount}, s.Mounts...) + + var maskedPaths []string + for _, s := range s.Linux.MaskedPaths { + if !hasPrefix(s, "/proc") { + maskedPaths = append(maskedPaths, s) } - m = append([]specs.Mount{procMount}, m...) - default: - return nil, errors.Errorf("unknown process mode: %v", processMode) } - return m, nil + s.Linux.MaskedPaths = maskedPaths + + var readonlyPaths []string + for _, s := range s.Linux.ReadonlyPaths { + if !hasPrefix(s, "/proc") { + readonlyPaths = append(readonlyPaths, s) + } + } + s.Linux.ReadonlyPaths = readonlyPaths + + return nil } } diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec.go b/vendor/github.com/moby/buildkit/executor/oci/spec.go index 9329fa90ba4db619a85eddfb8bdf3a0436ad26b5..44ad95e4bfb3777d802db5ec0f8e42f60f119b7a 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec.go @@ -1,6 +1,25 @@ package oci -// ProcMode configures PID namespaces +import ( + "context" + "path" + "sync" + + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/oci" + "github.com/containerd/continuity/fs" + "github.com/docker/docker/pkg/idtools" + "github.com/mitchellh/hashstructure" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/util/network" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// ProcessMode configures PID namespaces type ProcessMode int const ( @@ -11,3 +30,203 @@ const ( // NoProcessSandbox should be enabled only when the BuildKit is running in a container as an unprivileged user. NoProcessSandbox ) + +// Ideally we don't have to import whole containerd just for the default spec + +// GenerateSpec generates spec using containerd functionality. +// opts are ignored for s.Process, s.Hostname, and s.Mounts . +func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, processMode ProcessMode, idmap *idtools.IdentityMapping, opts ...oci.SpecOpts) (*specs.Spec, func(), error) { + c := &containers.Container{ + ID: id, + } + + // containerd/oci.GenerateSpec requires a namespace, which + // will be used to namespace specs.Linux.CgroupsPath if generated + if _, ok := namespaces.Namespace(ctx); !ok { + ctx = namespaces.WithNamespace(ctx, "buildkit") + } + + if mountOpts, err := generateMountOpts(resolvConf, hostsFile); err == nil { + opts = append(opts, mountOpts...) + } else { + return nil, nil, err + } + + if securityOpts, err := generateSecurityOpts(meta.SecurityMode); err == nil { + opts = append(opts, securityOpts...) + } else { + return nil, nil, err + } + + if processModeOpts, err := generateProcessModeOpts(processMode); err == nil { + opts = append(opts, processModeOpts...) + } else { + return nil, nil, err + } + + if idmapOpts, err := generateIDmapOpts(idmap); err == nil { + opts = append(opts, idmapOpts...) + } else { + return nil, nil, err + } + + hostname := defaultHostname + if meta.Hostname != "" { + hostname = meta.Hostname + } + + opts = append(opts, + oci.WithProcessArgs(meta.Args...), + oci.WithEnv(meta.Env), + oci.WithProcessCwd(meta.Cwd), + oci.WithNewPrivileges, + oci.WithHostname(hostname), + ) + + s, err := oci.GenerateSpec(ctx, nil, c, opts...) + if err != nil { + return nil, nil, err + } + + // set the networking information on the spec + if err := namespace.Set(s); err != nil { + return nil, nil, err + } + + s.Process.Rlimits = nil // reset open files limit + + sm := &submounts{} + + var releasers []func() error + releaseAll := func() { + sm.cleanup() + for _, f := range releasers { + f() + } + } + + for _, m := range mounts { + if m.Src == nil { + return nil, nil, errors.Errorf("mount %s has no source", m.Dest) + } + mountable, err := m.Src.Mount(ctx, m.Readonly) + if err != nil { + releaseAll() + return nil, nil, errors.Wrapf(err, "failed to mount %s", m.Dest) + } + mounts, release, err := mountable.Mount() + if err != nil { + releaseAll() + return nil, nil, errors.WithStack(err) + } + releasers = append(releasers, release) + for _, mount := range mounts { + mount, err = sm.subMount(mount, m.Selector) + if err != nil { + releaseAll() + return nil, nil, err + } + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: m.Dest, + Type: mount.Type, + Source: mount.Source, + Options: mount.Options, + }) + } + } + + return s, releaseAll, nil +} + +type mountRef struct { + mount mount.Mount + unmount func() error +} + +type submounts struct { + m map[uint64]mountRef +} + +func (s *submounts) subMount(m mount.Mount, subPath string) (mount.Mount, error) { + if path.Join("/", subPath) == "/" { + return m, nil + } + if s.m == nil { + s.m = map[uint64]mountRef{} + } + h, err := hashstructure.Hash(m, nil) + if err != nil { + return mount.Mount{}, nil + } + if mr, ok := s.m[h]; ok { + sm, err := sub(mr.mount, subPath) + if err != nil { + return mount.Mount{}, nil + } + return sm, nil + } + + lm := snapshot.LocalMounterWithMounts([]mount.Mount{m}) + + mp, err := lm.Mount() + if err != nil { + return mount.Mount{}, err + } + + opts := []string{"rbind"} + for _, opt := range m.Options { + if opt == "ro" { + opts = append(opts, opt) + } + } + + s.m[h] = mountRef{ + mount: mount.Mount{ + Source: mp, + Type: "bind", + Options: opts, + }, + unmount: lm.Unmount, + } + + sm, err := sub(s.m[h].mount, subPath) + if err != nil { + return mount.Mount{}, err + } + return sm, nil +} + +func (s *submounts) cleanup() { + var wg sync.WaitGroup + wg.Add(len(s.m)) + for _, m := range s.m { + func(m mountRef) { + go func() { + m.unmount() + wg.Done() + }() + }(m) + } + wg.Wait() +} + +func sub(m mount.Mount, subPath string) (mount.Mount, error) { + src, err := fs.RootPath(m.Source, subPath) + if err != nil { + return mount.Mount{}, err + } + m.Source = src + return m, nil +} + +func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping { + var ids []specs.LinuxIDMapping + for _, item := range s { + ids = append(ids, specs.LinuxIDMapping{ + HostID: uint32(item.HostID), + ContainerID: uint32(item.ContainerID), + Size: uint32(item.Size), + }) + } + return ids +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go b/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go index 8ab4fb47077daddc052c70f9e95c2981e23ec392..de36195c3315e74a0834e899cf2a8ca1c900fd24 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go @@ -3,252 +3,56 @@ package oci import ( - "context" - "path" - "sync" - - "github.com/containerd/containerd/containers" "github.com/containerd/containerd/contrib/seccomp" - "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/oci" - "github.com/containerd/continuity/fs" "github.com/docker/docker/pkg/idtools" - "github.com/mitchellh/hashstructure" - "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/entitlements/security" - "github.com/moby/buildkit/util/network" "github.com/moby/buildkit/util/system" specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" ) -// Ideally we don't have to import whole containerd just for the default spec - -// GenerateSpec generates spec using containerd functionality. -// opts are ignored for s.Process, s.Hostname, and s.Mounts . -func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, processMode ProcessMode, idmap *idtools.IdentityMapping, opts ...oci.SpecOpts) (*specs.Spec, func(), error) { - c := &containers.Container{ - ID: id, - } - _, ok := namespaces.Namespace(ctx) - if !ok { - ctx = namespaces.WithNamespace(ctx, "buildkit") - } - if meta.SecurityMode == pb.SecurityMode_INSECURE { - opts = append(opts, security.WithInsecureSpec()) - } else if system.SeccompSupported() && meta.SecurityMode == pb.SecurityMode_SANDBOX { - opts = append(opts, seccomp.WithDefaultProfile()) - } - - switch processMode { - case NoProcessSandbox: - // Mount for /proc is replaced in GetMounts() - opts = append(opts, - oci.WithHostNamespace(specs.PIDNamespace)) - // TODO(AkihiroSuda): Configure seccomp to disable ptrace (and prctl?) explicitly - } - - // Note that containerd.GenerateSpec is namespaced so as to make - // specs.Linux.CgroupsPath namespaced - s, err := oci.GenerateSpec(ctx, nil, c, opts...) - if err != nil { - return nil, nil, err - } - // set the networking information on the spec - namespace.Set(s) - - s.Process.Args = meta.Args - s.Process.Env = meta.Env - s.Process.Cwd = meta.Cwd - s.Process.Rlimits = nil // reset open files limit - s.Process.NoNewPrivileges = false // reset nonewprivileges - s.Hostname = "buildkitsandbox" - - s.Mounts, err = GetMounts(ctx, - withProcessMode(processMode), +func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) { + return []oci.SpecOpts{ + // https://github.com/moby/buildkit/issues/429 + withRemovedMount("/run"), withROBind(resolvConf, "/etc/resolv.conf"), withROBind(hostsFile, "/etc/hosts"), - ) - if err != nil { - return nil, nil, err - } - - s.Mounts = append(s.Mounts, specs.Mount{ - Destination: "/sys/fs/cgroup", - Type: "cgroup", - Source: "cgroup", - Options: []string{"ro", "nosuid", "noexec", "nodev"}, - }) - - if processMode == NoProcessSandbox { - var maskedPaths []string - for _, s := range s.Linux.MaskedPaths { - if !hasPrefix(s, "/proc") { - maskedPaths = append(maskedPaths, s) - } - } - s.Linux.MaskedPaths = maskedPaths - var readonlyPaths []string - for _, s := range s.Linux.ReadonlyPaths { - if !hasPrefix(s, "/proc") { - readonlyPaths = append(readonlyPaths, s) - } - } - s.Linux.ReadonlyPaths = readonlyPaths - } - - if meta.SecurityMode == pb.SecurityMode_INSECURE { - if err = oci.WithWriteableCgroupfs(ctx, nil, c, s); err != nil { - return nil, nil, err - } - if err = oci.WithWriteableSysfs(ctx, nil, c, s); err != nil { - return nil, nil, err - } - } - - if idmap != nil { - s.Linux.Namespaces = append(s.Linux.Namespaces, specs.LinuxNamespace{ - Type: specs.UserNamespace, - }) - s.Linux.UIDMappings = specMapping(idmap.UIDs()) - s.Linux.GIDMappings = specMapping(idmap.GIDs()) - } - - sm := &submounts{} - - var releasers []func() error - releaseAll := func() { - sm.cleanup() - for _, f := range releasers { - f() - } - } - - for _, m := range mounts { - if m.Src == nil { - return nil, nil, errors.Errorf("mount %s has no source", m.Dest) - } - mountable, err := m.Src.Mount(ctx, m.Readonly) - if err != nil { - releaseAll() - return nil, nil, errors.Wrapf(err, "failed to mount %s", m.Dest) - } - mounts, release, err := mountable.Mount() - if err != nil { - releaseAll() - return nil, nil, errors.WithStack(err) - } - releasers = append(releasers, release) - for _, mount := range mounts { - mount, err = sm.subMount(mount, m.Selector) - if err != nil { - releaseAll() - return nil, nil, err - } - s.Mounts = append(s.Mounts, specs.Mount{ - Destination: m.Dest, - Type: mount.Type, - Source: mount.Source, - Options: mount.Options, - }) - } - } - - return s, releaseAll, nil -} - -type mountRef struct { - mount mount.Mount - unmount func() error + withCGroup(), + }, nil } -type submounts struct { - m map[uint64]mountRef +// generateSecurityOpts may affect mounts, so must be called after generateMountOpts +func generateSecurityOpts(mode pb.SecurityMode) ([]oci.SpecOpts, error) { + if mode == pb.SecurityMode_INSECURE { + return []oci.SpecOpts{ + security.WithInsecureSpec(), + oci.WithWriteableCgroupfs, + oci.WithWriteableSysfs, + }, nil + } else if system.SeccompSupported() && mode == pb.SecurityMode_SANDBOX { + return []oci.SpecOpts{seccomp.WithDefaultProfile()}, nil + } + return nil, nil } -func (s *submounts) subMount(m mount.Mount, subPath string) (mount.Mount, error) { - if path.Join("/", subPath) == "/" { - return m, nil - } - if s.m == nil { - s.m = map[uint64]mountRef{} - } - h, err := hashstructure.Hash(m, nil) - if err != nil { - return mount.Mount{}, nil - } - if mr, ok := s.m[h]; ok { - sm, err := sub(mr.mount, subPath) - if err != nil { - return mount.Mount{}, nil - } - return sm, nil - } - - lm := snapshot.LocalMounterWithMounts([]mount.Mount{m}) - - mp, err := lm.Mount() - if err != nil { - return mount.Mount{}, err - } - - opts := []string{"rbind"} - for _, opt := range m.Options { - if opt == "ro" { - opts = append(opts, opt) - } - } - - s.m[h] = mountRef{ - mount: mount.Mount{ - Source: mp, - Type: "bind", - Options: opts, - }, - unmount: lm.Unmount, - } - - sm, err := sub(s.m[h].mount, subPath) - if err != nil { - return mount.Mount{}, err - } - return sm, nil -} - -func (s *submounts) cleanup() { - var wg sync.WaitGroup - wg.Add(len(s.m)) - for _, m := range s.m { - func(m mountRef) { - go func() { - m.unmount() - wg.Done() - }() - }(m) - } - wg.Wait() -} - -func sub(m mount.Mount, subPath string) (mount.Mount, error) { - src, err := fs.RootPath(m.Source, subPath) - if err != nil { - return mount.Mount{}, err +// generateProcessModeOpts may affect mounts, so must be called after generateMountOpts +func generateProcessModeOpts(mode ProcessMode) ([]oci.SpecOpts, error) { + if mode == NoProcessSandbox { + return []oci.SpecOpts{ + oci.WithHostNamespace(specs.PIDNamespace), + withBoundProc(), + }, nil + // TODO(AkihiroSuda): Configure seccomp to disable ptrace (and prctl?) explicitly } - m.Source = src - return m, nil + return nil, nil } -func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping { - var ids []specs.LinuxIDMapping - for _, item := range s { - ids = append(ids, specs.LinuxIDMapping{ - HostID: uint32(item.HostID), - ContainerID: uint32(item.ContainerID), - Size: uint32(item.Size), - }) +func generateIDmapOpts(idmap *idtools.IdentityMapping) ([]oci.SpecOpts, error) { + if idmap == nil { + return nil, nil } - return ids + return []oci.SpecOpts{ + oci.WithUserNamespace(specMapping(idmap.UIDs()), specMapping(idmap.GIDs())), + }, nil } diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go b/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go new file mode 100644 index 0000000000000000000000000000000000000000..4589c9d98e016ec4fd065c114cdeda5c19511a92 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go @@ -0,0 +1,42 @@ +// +build windows + +package oci + +import ( + "github.com/containerd/containerd/contrib/seccomp" + "github.com/containerd/containerd/oci" + "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/system" + "github.com/pkg/errors" +) + +func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) { + return nil, nil +} + +// generateSecurityOpts may affect mounts, so must be called after generateMountOpts +func generateSecurityOpts(mode pb.SecurityMode) ([]oci.SpecOpts, error) { + if mode == pb.SecurityMode_INSECURE { + return nil, errors.New("no support for running in insecure mode on Windows") + } else if system.SeccompSupported() && mode == pb.SecurityMode_SANDBOX { + // TODO: Can LCOW support seccomp? Does that even make sense? + return []oci.SpecOpts{seccomp.WithDefaultProfile()}, nil + } + return nil, nil +} + +// generateProcessModeOpts may affect mounts, so must be called after generateMountOpts +func generateProcessModeOpts(mode ProcessMode) ([]oci.SpecOpts, error) { + if mode == NoProcessSandbox { + return nil, errors.New("no support for NoProcessSandbox on Windows") + } + return nil, nil +} + +func generateIDmapOpts(idmap *idtools.IdentityMapping) ([]oci.SpecOpts, error) { + if idmap == nil { + return nil, nil + } + return nil, errors.New("no support for IdentityMapping on Windows") +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/user.go b/vendor/github.com/moby/buildkit/executor/oci/user.go index 4f7cb107ef1261371ed9f91212db786ec20012a7..e49eb8e4d33e875c0d3afaaa0ca88e1ffe576c9e 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/user.go +++ b/vendor/github.com/moby/buildkit/executor/oci/user.go @@ -14,7 +14,7 @@ import ( "github.com/pkg/errors" ) -func GetUser(ctx context.Context, root, username string) (uint32, uint32, []uint32, error) { +func GetUser(root, username string) (uint32, uint32, []uint32, error) { // fast path from uid/gid if uid, gid, err := ParseUIDGID(username); err == nil { return uid, gid, nil, nil diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go index 068929c6635d4292390b7a9e972876055bdac51b..035edfd295046a20f51286331bc52a35b5038942 100644 --- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go @@ -12,15 +12,16 @@ import ( "syscall" "time" + "github.com/containerd/containerd" "github.com/containerd/containerd/mount" containerdoci "github.com/containerd/containerd/oci" "github.com/containerd/continuity/fs" runc "github.com/containerd/go-runc" "github.com/docker/docker/pkg/idtools" - "github.com/moby/buildkit/cache" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/executor/oci" "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/network" rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv" @@ -52,7 +53,6 @@ var defaultCommandCandidates = []string{"buildkit-runc", "runc"} type runcExecutor struct { runc *runc.Runc root string - cmd string cgroupParent string rootless bool networkProviders map[pb.NetMode]network.Provider @@ -103,15 +103,16 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex os.RemoveAll(filepath.Join(root, "resolv.conf")) runtime := &runc.Runc{ - Command: cmd, - Log: filepath.Join(root, "runc-log.json"), - LogFormat: runc.JSON, - PdeathSignal: syscall.SIGKILL, // this can still leak the process - Setpgid: true, + Command: cmd, + Log: filepath.Join(root, "runc-log.json"), + LogFormat: runc.JSON, + Setpgid: true, // we don't execute runc with --rootless=(true|false) explicitly, // so as to support non-runc runtimes } + updateRuncFieldsForHostOS(runtime) + w := &runcExecutor{ runc: runtime, root: root, @@ -128,7 +129,7 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex return w, nil } -func (w *runcExecutor) Run(ctx context.Context, id string, root cache.Mountable, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) { +func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) { meta := process.Meta startedOnce := sync.Once{} @@ -168,7 +169,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root cache.Mountable, return err } - hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, w.idmap) + hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, w.idmap, meta.Hostname) if err != nil { return err } @@ -176,7 +177,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root cache.Mountable, defer clean() } - mountable, err := root.Mount(ctx, false) + mountable, err := root.Src.Mount(ctx, false) if err != nil { return err } @@ -213,7 +214,9 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root cache.Mountable, } defer mount.Unmount(rootFSPath, 0) - uid, gid, sgids, err := oci.GetUser(ctx, rootFSPath, meta.User) + defer executor.MountStubsCleaner(rootFSPath, mounts)() + + uid, gid, sgids, err := oci.GetUser(rootFSPath, meta.User) if err != nil { return err } @@ -258,7 +261,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root cache.Mountable, defer cleanup() spec.Root.Path = rootFSPath - if _, ok := root.(cache.ImmutableRef); ok { // TODO: pass in with mount, not ref type + if root.Readonly { spec.Root.Readonly = true } @@ -323,28 +326,37 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root cache.Mountable, close(started) }) } - status, err := w.runc.Run(runCtx, id, bundle, &runc.CreateOpts{ - IO: &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}, - NoPivot: w.noPivot, - }) + + err = w.run(runCtx, id, bundle, process) close(ended) + return exitError(ctx, err) +} - if status != 0 || err != nil { - if err == nil { - err = errors.Errorf("exit code: %d", status) +func exitError(ctx context.Context, err error) error { + if err != nil { + exitErr := &errdefs.ExitError{ + ExitCode: containerd.UnknownExitStatus, + Err: err, + } + var runcExitError *runc.ExitError + if errors.As(err, &runcExitError) { + exitErr = &errdefs.ExitError{ + ExitCode: uint32(runcExitError.Status), + } } select { case <-ctx.Done(): - return errors.Wrapf(ctx.Err(), err.Error()) + exitErr.Err = errors.Wrapf(ctx.Err(), exitErr.Error()) + return exitErr default: - return stack.Enable(err) + return stack.Enable(exitErr) } } return nil } -func (w *runcExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) error { +func (w *runcExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) (err error) { // first verify the container is running, if we get an error assume the container // is in the process of being created and check again every 100ms or until // context is canceled. @@ -386,7 +398,7 @@ func (w *runcExecutor) Exec(ctx context.Context, id string, process executor.Pro } if process.Meta.User != "" { - uid, gid, sgids, err := oci.GetUser(ctx, state.Rootfs, process.Meta.User) + uid, gid, sgids, err := oci.GetUser(state.Rootfs, process.Meta.User) if err != nil { return err } @@ -407,9 +419,8 @@ func (w *runcExecutor) Exec(ctx context.Context, id string, process executor.Pro spec.Process.Env = process.Meta.Env } - return w.runc.Exec(ctx, id, *spec.Process, &runc.ExecOpts{ - IO: &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}, - }) + err = w.exec(ctx, id, state.Bundle, spec.Process, process) + return exitError(ctx, err) } type forwardIO struct { diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go new file mode 100644 index 0000000000000000000000000000000000000000..3751b9009caa510f87bb2cbbe664c872288b3a1d --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go @@ -0,0 +1,36 @@ +// +build !linux + +package runcexecutor + +import ( + "context" + + runc "github.com/containerd/go-runc" + "github.com/moby/buildkit/executor" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +var unsupportedConsoleError = errors.New("tty for runc is only supported on linux") + +func updateRuncFieldsForHostOS(runtime *runc.Runc) {} + +func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo) error { + if process.Meta.Tty { + return unsupportedConsoleError + } + _, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{ + IO: &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}, + NoPivot: w.noPivot, + }) + return err +} + +func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess *specs.Process, process executor.ProcessInfo) error { + if process.Meta.Tty { + return unsupportedConsoleError + } + return w.runc.Exec(ctx, id, *specsProcess, &runc.ExecOpts{ + IO: &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}, + }) +} diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..b01040ce9f0d8a89264fdf6bf7aac7e868609ab0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go @@ -0,0 +1,160 @@ +package runcexecutor + +import ( + "context" + "io" + "os" + "syscall" + "time" + + "github.com/containerd/console" + runc "github.com/containerd/go-runc" + "github.com/docker/docker/pkg/signal" + "github.com/moby/buildkit/executor" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +func updateRuncFieldsForHostOS(runtime *runc.Runc) { + // PdeathSignal only supported on unix platforms + runtime.PdeathSignal = syscall.SIGKILL // this can still leak the process +} + +func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo) error { + return w.callWithIO(ctx, id, bundle, process, func(ctx context.Context, started chan<- int, io runc.IO) error { + _, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{ + NoPivot: w.noPivot, + Started: started, + IO: io, + }) + return err + }) +} + +func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess *specs.Process, process executor.ProcessInfo) error { + return w.callWithIO(ctx, id, bundle, process, func(ctx context.Context, started chan<- int, io runc.IO) error { + return w.runc.Exec(ctx, id, *specsProcess, &runc.ExecOpts{ + Started: started, + IO: io, + }) + }) +} + +type runcCall func(ctx context.Context, started chan<- int, io runc.IO) error + +func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, process executor.ProcessInfo, call runcCall) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if !process.Meta.Tty { + return call(ctx, nil, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}) + } + + ptm, ptsName, err := console.NewPty() + if err != nil { + return err + } + + pts, err := os.OpenFile(ptsName, os.O_RDWR|syscall.O_NOCTTY, 0) + if err != nil { + ptm.Close() + return err + } + + eg, ctx := errgroup.WithContext(ctx) + + defer func() { + if process.Stdin != nil { + process.Stdin.Close() + } + pts.Close() + ptm.Close() + cancel() // this will shutdown resize loop + err := eg.Wait() + if err != nil { + logrus.Warningf("error while shutting down tty io: %s", err) + } + }() + + if process.Stdin != nil { + eg.Go(func() error { + _, err := io.Copy(ptm, process.Stdin) + // stdin might be a pipe, so this is like EOF + if errors.Is(err, io.ErrClosedPipe) { + return nil + } + return err + }) + } + + if process.Stdout != nil { + eg.Go(func() error { + _, err := io.Copy(process.Stdout, ptm) + // ignore `read /dev/ptmx: input/output error` when ptm is closed + var ptmClosedError *os.PathError + if errors.As(err, &ptmClosedError) { + if ptmClosedError.Op == "read" && + ptmClosedError.Path == "/dev/ptmx" && + ptmClosedError.Err == syscall.EIO { + return nil + } + } + return err + }) + } + + started := make(chan int, 1) + + eg.Go(func() error { + startedCtx, timeout := context.WithTimeout(ctx, 10*time.Second) + defer timeout() + var runcProcess *os.Process + select { + case <-startedCtx.Done(): + return errors.New("runc started message never received") + case pid, ok := <-started: + if !ok { + return errors.New("runc process failed to send pid") + } + runcProcess, err = os.FindProcess(pid) + if err != nil { + return errors.Wrapf(err, "unable to find runc process for pid %d", pid) + } + defer runcProcess.Release() + } + + for { + select { + case <-ctx.Done(): + return nil + case resize := <-process.Resize: + err = ptm.Resize(console.WinSize{ + Height: uint16(resize.Rows), + Width: uint16(resize.Cols), + }) + if err != nil { + logrus.Errorf("failed to resize ptm: %s", err) + } + err = runcProcess.Signal(signal.SIGWINCH) + if err != nil { + logrus.Errorf("failed to send SIGWINCH to process: %s", err) + } + } + } + }) + + runcIO := &forwardIO{} + if process.Stdin != nil { + runcIO.stdin = pts + } + if process.Stdout != nil { + runcIO.stdout = pts + } + if process.Stderr != nil { + runcIO.stderr = pts + } + + return call(ctx, started, runcIO) +} diff --git a/vendor/github.com/moby/buildkit/executor/stubs.go b/vendor/github.com/moby/buildkit/executor/stubs.go new file mode 100644 index 0000000000000000000000000000000000000000..2c13b13053a4ffdf1651ebd66ad00fda37a4273c --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/stubs.go @@ -0,0 +1,49 @@ +package executor + +import ( + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/containerd/continuity/fs" +) + +func MountStubsCleaner(dir string, mounts []Mount) func() { + names := []string{"/etc/resolv.conf", "/etc/hosts"} + + for _, m := range mounts { + names = append(names, m.Dest) + } + + paths := make([]string, 0, len(names)) + + for _, p := range names { + p = filepath.Join("/", p) + if p == "/" { + continue + } + realPath, err := fs.RootPath(dir, p) + if err != nil { + continue + } + + _, err = os.Lstat(realPath) + if errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) { + paths = append(paths, realPath) + } + } + + return func() { + for _, p := range paths { + st, err := os.Lstat(p) + if err != nil { + continue + } + if st.Size() != 0 { + continue + } + os.Remove(p) + } + } +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go index 02e34eb63389a2391b6fdb2f25b8211fb7833da1..b428afd0e6b6e9bc3eb7ca591f7fa82e7698dd81 100644 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go @@ -1,11 +1,16 @@ package exptypes -import specs "github.com/opencontainers/image-spec/specs-go/v1" +import ( + "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) const ExporterImageConfigKey = "containerimage.config" const ExporterInlineCache = "containerimage.inlinecache" const ExporterPlatformsKey = "refs.platforms" +const EmptyGZLayer = digest.Digest("sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1") + type Platforms struct { Platforms []Platform } diff --git a/vendor/github.com/moby/buildkit/exporter/local/export.go b/vendor/github.com/moby/buildkit/exporter/local/export.go index 28e32204f57c0570b692c20d309c342da5e82a16..d772776a9abdcf83d82e055f59032c52408a616c 100644 --- a/vendor/github.com/moby/buildkit/exporter/local/export.go +++ b/vendor/github.com/moby/buildkit/exporter/local/export.go @@ -51,7 +51,7 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() - caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID) + caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false) if err != nil { return nil, err } @@ -70,7 +70,7 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, } defer os.RemoveAll(src) } else { - mount, err := ref.Mount(ctx, true) + mount, err := ref.Mount(ctx, true, session.NewGroup(sessionID)) if err != nil { return err } diff --git a/vendor/github.com/moby/buildkit/exporter/tar/export.go b/vendor/github.com/moby/buildkit/exporter/tar/export.go index 0f635fc1b46c6d785367e23b0815eec20c51e530..79e98cd6a1396ecde611d286ee866cfe31786ae2 100644 --- a/vendor/github.com/moby/buildkit/exporter/tar/export.go +++ b/vendor/github.com/moby/buildkit/exporter/tar/export.go @@ -65,7 +65,7 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, } defers = append(defers, func() { os.RemoveAll(src) }) } else { - mount, err := ref.Mount(ctx, true) + mount, err := ref.Mount(ctx, true, session.NewGroup(sessionID)) if err != nil { return nil, err } @@ -135,7 +135,7 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() - caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID) + caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go index 6d959d9deb4c3f3fcbe807ae8c137b101d0be45a..f182ae96638c82f1deddd54134b325478f77a9d8 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go @@ -14,11 +14,11 @@ import ( "strings" "github.com/containerd/containerd/platforms" - "github.com/docker/docker/builder/dockerignore" controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" + "github.com/moby/buildkit/frontend/dockerfile/dockerignore" "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/moby/buildkit/frontend/gateway/client" gwpb "github.com/moby/buildkit/frontend/gateway/pb" @@ -53,16 +53,23 @@ const ( keyNameDockerfile = "dockerfilekey" keyContextSubDir = "contextsubdir" keyContextKeepGitDir = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR" + keySyntax = "build-arg:BUILDKIT_SYNTAX" + keyHostname = "hostname" ) var httpPrefix = regexp.MustCompile(`^https?://`) -var gitUrlPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`) +var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`) func Build(ctx context.Context, c client.Client) (*client.Result, error) { opts := c.BuildOpts().Opts caps := c.BuildOpts().LLBCaps gwcaps := c.BuildOpts().Caps + allowForward, capsError := validateCaps(opts["frontend.caps"]) + if !allowForward && capsError != nil { + return nil, capsError + } + marshalOpts := []llb.ConstraintsOpt{llb.WithCaps(caps)} localNameContext := DefaultLocalNameContext @@ -317,8 +324,14 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { } if _, ok := opts["cmdline"]; !ok { - ref, cmdline, loc, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile)) - if ok { + if cmdline, ok := opts[keySyntax]; ok { + p := strings.SplitN(strings.TrimSpace(cmdline), " ", 2) + res, err := forwardGateway(ctx, c, p[0], cmdline) + if err != nil && len(errdefs.Sources(err)) == 0 { + return nil, errors.Wrapf(err, "failed with %s = %s", keySyntax, cmdline) + } + return res, err + } else if ref, cmdline, loc, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile)); ok { res, err := forwardGateway(ctx, c, ref, cmdline) if err != nil && len(errdefs.Sources(err)) == 0 { return nil, wrapSource(err, sourceMap, loc) @@ -327,6 +340,14 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { } } + if capsError != nil { + return nil, capsError + } + + if res, ok, err := checkSubRequest(ctx, opts); ok { + return res, err + } + exportMap := len(targetPlatforms) > 1 if v := opts[keyMultiPlatform]; v != "" { @@ -375,6 +396,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { OverrideCopyImage: opts[keyOverrideCopyImage], LLBCaps: &caps, SourceMap: sourceMap, + Hostname: opts[keyHostname], }) if err != nil { @@ -512,7 +534,7 @@ func filter(opt map[string]string, key string) map[string]string { func detectGitContext(ref, gitContext string) (*llb.State, bool) { found := false - if httpPrefix.MatchString(ref) && gitUrlPathWithFragmentSuffix.MatchString(ref) { + if httpPrefix.MatchString(ref) && gitURLPathWithFragmentSuffix.MatchString(ref) { found = true } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/caps.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/caps.go new file mode 100644 index 0000000000000000000000000000000000000000..279701154eac2959360084afb208702f2a8317c6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/caps.go @@ -0,0 +1,34 @@ +package builder + +import ( + "strings" + + "github.com/moby/buildkit/solver/errdefs" + "github.com/moby/buildkit/util/grpcerrors" + "github.com/moby/buildkit/util/stack" + "google.golang.org/grpc/codes" +) + +var enabledCaps = map[string]struct{}{ + "moby.buildkit.frontend.inputs": {}, + "moby.buildkit.frontend.subrequests": {}, +} + +func validateCaps(req string) (forward bool, err error) { + if req == "" { + return + } + caps := strings.Split(req, ",") + for _, c := range caps { + parts := strings.SplitN(c, "+", 2) + if _, ok := enabledCaps[parts[0]]; !ok { + err = stack.Enable(grpcerrors.WrapCode(errdefs.NewUnsupportedFrontendCapError(parts[0]), codes.Unimplemented)) + if strings.Contains(c, "+forward") { + forward = true + } else { + return false, err + } + } + } + return +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/subrequests.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/subrequests.go new file mode 100644 index 0000000000000000000000000000000000000000..6d30b7b8cc1cf9d6ac8d13ab8189821a54d638a1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/subrequests.go @@ -0,0 +1,39 @@ +package builder + +import ( + "context" + "encoding/json" + + "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/frontend/subrequests" + "github.com/moby/buildkit/solver/errdefs" +) + +func checkSubRequest(ctx context.Context, opts map[string]string) (*client.Result, bool, error) { + req, ok := opts["requestid"] + if !ok { + return nil, false, nil + } + switch req { + case subrequests.RequestSubrequestsDescribe: + res, err := describe() + return res, true, err + default: + return nil, true, errdefs.NewUnsupportedSubrequestError(req) + } +} + +func describe() (*client.Result, error) { + all := []subrequests.Request{ + subrequests.SubrequestsDescribeDefinition, + } + dt, err := json.MarshalIndent(all, " ", "") + if err != nil { + return nil, err + } + res := client.NewResult() + res.Metadata = map[string][]byte{ + "result.json": dt, + } + return res, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go index 28c7efcaec20ca30f77ae753555b9330cd475ca8..5b3d52a6366a770f059b6916d90c74c71f7f95dd 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go @@ -62,6 +62,7 @@ type ConvertOpt struct { LLBCaps *apicaps.CapSet ContextLocalName string SourceMap *llb.SourceMap + Hostname string } func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, error) { @@ -94,11 +95,13 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, shlex := shell.NewLex(dockerfile.EscapeToken) - for _, metaArg := range metaArgs { - if metaArg.Value != nil { - *metaArg.Value, _ = shlex.ProcessWordWithMap(*metaArg.Value, metaArgsToMap(optMetaArgs)) + for _, cmd := range metaArgs { + for _, metaArg := range cmd.Args { + if metaArg.Value != nil { + *metaArg.Value, _ = shlex.ProcessWordWithMap(*metaArg.Value, metaArgsToMap(optMetaArgs)) + } + optMetaArgs = append(optMetaArgs, setKVValue(metaArg, opt.BuildArgs)) } - optMetaArgs = append(optMetaArgs, setKVValue(metaArg.KeyValuePairOptional, opt.BuildArgs)) } metaResolver := opt.MetaResolver @@ -314,7 +317,11 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, // make sure that PATH is always set if _, ok := shell.BuildEnvs(d.image.Config.Env)["PATH"]; !ok { - d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv) + var os string + if d.platform != nil { + os = d.platform.OS + } + d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv(os)) } // initialize base metadata from image conf @@ -322,6 +329,9 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, k, v := parseKeyValue(env) d.state = d.state.AddEnv(k, v) } + if opt.Hostname != "" { + d.state = d.state.Hostname(opt.Hostname) + } if d.image.Config.WorkingDir != "" { if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false, nil); err != nil { return nil, nil, parser.WithLocation(err, d.stage.Location) @@ -1072,26 +1082,30 @@ func dispatchShell(d *dispatchState, c *instructions.ShellCommand) error { } func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instructions.KeyValuePairOptional, buildArgValues map[string]string) error { - commitStr := "ARG " + c.Key - buildArg := setKVValue(c.KeyValuePairOptional, buildArgValues) + commitStrs := make([]string, 0, len(c.Args)) + for _, arg := range c.Args { + buildArg := setKVValue(arg, buildArgValues) - if c.Value != nil { - commitStr += "=" + *c.Value - } - if buildArg.Value == nil { - for _, ma := range metaArgs { - if ma.Key == buildArg.Key { - buildArg.Value = ma.Value + commitStr := arg.Key + if arg.Value != nil { + commitStr += "=" + *arg.Value + } + commitStrs = append(commitStrs, commitStr) + if buildArg.Value == nil { + for _, ma := range metaArgs { + if ma.Key == buildArg.Key { + buildArg.Value = ma.Value + } } } - } - if buildArg.Value != nil { - d.state = d.state.AddEnv(buildArg.Key, *buildArg.Value) - } + if buildArg.Value != nil { + d.state = d.state.AddEnv(buildArg.Key, *buildArg.Value) + } - d.buildArgs = append(d.buildArgs, buildArg) - return commitToHistory(&d.image, commitStr, false, nil) + d.buildArgs = append(d.buildArgs, buildArg) + } + return commitToHistory(&d.image, "ARG "+strings.Join(commitStrs, " "), false, nil) } func pathRelativeToWorkingDir(s llb.State, p string) (string, error) { @@ -1308,15 +1322,15 @@ func proxyEnvFromBuildArgs(args map[string]string) *llb.ProxyEnv { isNil := true for k, v := range args { if strings.EqualFold(k, "http_proxy") { - pe.HttpProxy = v + pe.HTTPProxy = v isNil = false } if strings.EqualFold(k, "https_proxy") { - pe.HttpsProxy = v + pe.HTTPSProxy = v isNil = false } if strings.EqualFold(k, "ftp_proxy") { - pe.FtpProxy = v + pe.FTPProxy = v isNil = false } if strings.EqualFold(k, "no_proxy") { @@ -1339,7 +1353,7 @@ func withShell(img Image, args []string) []string { if len(img.Config.Shell) > 0 { shell = append([]string{}, img.Config.Shell...) } else { - shell = defaultShell() + shell = defaultShell(img.OS) } return append(shell, strings.Join(args, " ")) } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go deleted file mode 100644 index 5f0cd086023a511e201ffbd056db4db7df8ff368..0000000000000000000000000000000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunmount.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !dfrunmount - -package dockerfile2llb - -import ( - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" -) - -func detectRunMount(cmd *command, allDispatchStates *dispatchStates) bool { - return false -} - -func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*dispatchState, opt dispatchOpt) ([]llb.RunOption, error) { - return nil, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nosecrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nosecrets.go deleted file mode 100644 index d75470215fd360ddd554ef591f80394f6898ca20..0000000000000000000000000000000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nosecrets.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build dfrunmount,!dfsecrets - -package dockerfile2llb - -import ( - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/pkg/errors" -) - -func dispatchSecret(m *instructions.Mount) (llb.RunOption, error) { - return nil, errors.Errorf("secret mounts not allowed") -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nossh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nossh.go deleted file mode 100644 index 8b8afdc38c10d7e6e18cd383efb15646c5395c0b..0000000000000000000000000000000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_nossh.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build dfrunmount,!dfssh - -package dockerfile2llb - -import ( - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/pkg/errors" -) - -func dispatchSSH(m *instructions.Mount) (llb.RunOption, error) { - return nil, errors.Errorf("ssh mounts not allowed") -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go index 1be4849e2ac068e5f49fb663f968993c177cba13..8cd928b2b630025e78d6b264be1a8d5a85708ee1 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go @@ -1,5 +1,3 @@ -// +build dfrunmount - package dockerfile2llb import ( diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go index 59c055a02d4349ff0cf11009bedc1143a70eeee2..2c88a5e4f7e7fd630e889f99718951c0c4c38488 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_secrets.go @@ -1,5 +1,3 @@ -// +build dfsecrets - package dockerfile2llb import ( diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go index a29b5350d72fca7dbba568130c43266e2f07c160..b55659d978835f6d177075e23f4f5f8ed5261876 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_ssh.go @@ -1,5 +1,3 @@ -// +build dfssh - package dockerfile2llb import ( diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell.go new file mode 100644 index 0000000000000000000000000000000000000000..58b17372bd44d45705dff83bc1b640f6ef4f62f6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell.go @@ -0,0 +1,8 @@ +package dockerfile2llb + +func defaultShell(os string) []string { + if os == "windows" { + return []string{"cmd", "/S", "/C"} + } + return []string{"/bin/sh", "-c"} +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go deleted file mode 100644 index b5d541d1f5db85871c60438f30c818393f4fde1e..0000000000000000000000000000000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package dockerfile2llb - -func defaultShell() []string { - return []string{"/bin/sh", "-c"} -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go deleted file mode 100644 index 7693e050863a4d159b89f31862e42204462aaed9..0000000000000000000000000000000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/defaultshell_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build windows - -package dockerfile2llb - -func defaultShell() []string { - return []string{"cmd", "/S", "/C"} -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go index 55e9add23363f30e2126f6a5469c06682112c822..6eb4cf6e7ae05424cfd13d59ac2a3e05cac39b4d 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go @@ -74,6 +74,6 @@ func emptyImage(platform specs.Platform) Image { } img.RootFS.Type = "layers" img.Config.WorkingDir = "/" - img.Config.Env = []string{"PATH=" + system.DefaultPathEnv} + img.Config.Env = []string{"PATH=" + system.DefaultPathEnv(platform.OS)} return img } diff --git a/builder/dockerignore/dockerignore.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerignore/dockerignore.go similarity index 94% rename from builder/dockerignore/dockerignore.go rename to vendor/github.com/moby/buildkit/frontend/dockerfile/dockerignore/dockerignore.go index 57f224afc8a32edb18ccd91574204149f4041331..cc22381339f65c8072a572ec17229966c49076d1 100644 --- a/builder/dockerignore/dockerignore.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerignore/dockerignore.go @@ -1,4 +1,4 @@ -package dockerignore // import "github.com/docker/docker/builder/dockerignore" +package dockerignore import ( "bufio" diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go index 4a4146a4334426786f7d33dfdd88111044efc603..e6027445ac1d1b1b7e2ddd0d837542342f93e597 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go @@ -21,8 +21,9 @@ func (kvp *KeyValuePair) String() string { // KeyValuePairOptional is the same as KeyValuePair but Value is optional type KeyValuePairOptional struct { - Key string - Value *string + Key string + Value *string + Comment string } func (kvpo *KeyValuePairOptional) ValueString() string { @@ -380,22 +381,25 @@ func (c *StopSignalCommand) CheckPlatform(platform string) error { // Dockerfile author may optionally set a default value of this variable. type ArgCommand struct { withNameAndCode - KeyValuePairOptional + Args []KeyValuePairOptional } // Expand variables func (c *ArgCommand) Expand(expander SingleWordExpander) error { - p, err := expander(c.Key) - if err != nil { - return err - } - c.Key = p - if c.Value != nil { - p, err = expander(*c.Value) + for i, v := range c.Args { + p, err := expander(v.Key) if err != nil { return err } - c.Value = &p + v.Key = p + if v.Value != nil { + p, err = expander(*v.Value) + if err != nil { + return err + } + v.Value = &p + } + c.Args[i] = v } return nil } @@ -416,6 +420,7 @@ type Stage struct { SourceCode string Platform string Location []parser.Range + Comment string } // AddCommand to the stage diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nosecrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nosecrets.go deleted file mode 100644 index 58780648db82fbea9c8d46a2d8b1700dcef0b70e..0000000000000000000000000000000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nosecrets.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !dfsecrets - -package instructions - -func isSecretMountsSupported() bool { - return false -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nossh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nossh.go deleted file mode 100644 index a131a273c3ef2e404aebb37708b29148edabfd21..0000000000000000000000000000000000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_nossh.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !dfssh - -package instructions - -func isSSHMountsSupported() bool { - return false -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go index 442877d8662cbf0c567224a440fc150c9e591ef3..b0b0f0103b5aeb6299efeecba5e6172738a16f6d 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go @@ -1,5 +1,3 @@ -// +build dfrunmount - package instructions import ( diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go index 6cce1191d73544296be0978857c7a397ad766b62..2b4140b72ae791ed3be3fcf3b86e974ddd536dda 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_secrets.go @@ -1,5 +1,3 @@ -// +build dfsecrets - package instructions func isSecretMountsSupported() bool { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go index 0b94a564470b428df082a2c759986e662c9a2cfe..0e4e5f38c72872f6f52afeb43a96b41b24d6e480 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_ssh.go @@ -1,5 +1,3 @@ -// +build dfssh - package instructions func isSSHMountsSupported() bool { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go index 1be9d7b2999d5ce9493de3c72dd51144baf71a72..83cab66944dd930a0ec16a2cad5a4aa5706a5888 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go @@ -22,6 +22,7 @@ type parseRequest struct { flags *BFlags original string location []parser.Range + comments []string } var parseRunPreHooks []func(*RunCommand, parseRequest) error @@ -50,6 +51,7 @@ func newParseRequestFromNode(node *parser.Node) parseRequest { original: node.Original, flags: NewBFlagsWithArgs(node.Flags), location: node.Location(), + comments: node.PrevComment, } } @@ -289,6 +291,7 @@ func parseFrom(req parseRequest) (*Stage, error) { Commands: []Command{}, Platform: flPlatform.Value, Location: req.location, + Comment: getComment(req.comments, stageName), }, nil } @@ -579,33 +582,38 @@ func parseStopSignal(req parseRequest) (*StopSignalCommand, error) { } func parseArg(req parseRequest) (*ArgCommand, error) { - if len(req.args) != 1 { - return nil, errExactlyOneArgument("ARG") + if len(req.args) < 1 { + return nil, errAtLeastOneArgument("ARG") } - kvpo := KeyValuePairOptional{} + pairs := make([]KeyValuePairOptional, len(req.args)) - arg := req.args[0] - // 'arg' can just be a name or name-value pair. Note that this is different - // from 'env' that handles the split of name and value at the parser level. - // The reason for doing it differently for 'arg' is that we support just - // defining an arg and not assign it a value (while 'env' always expects a - // name-value pair). If possible, it will be good to harmonize the two. - if strings.Contains(arg, "=") { - parts := strings.SplitN(arg, "=", 2) - if len(parts[0]) == 0 { - return nil, errBlankCommandNames("ARG") - } + for i, arg := range req.args { + kvpo := KeyValuePairOptional{} - kvpo.Key = parts[0] - kvpo.Value = &parts[1] - } else { - kvpo.Key = arg + // 'arg' can just be a name or name-value pair. Note that this is different + // from 'env' that handles the split of name and value at the parser level. + // The reason for doing it differently for 'arg' is that we support just + // defining an arg and not assign it a value (while 'env' always expects a + // name-value pair). If possible, it will be good to harmonize the two. + if strings.Contains(arg, "=") { + parts := strings.SplitN(arg, "=", 2) + if len(parts[0]) == 0 { + return nil, errBlankCommandNames("ARG") + } + + kvpo.Key = parts[0] + kvpo.Value = &parts[1] + } else { + kvpo.Key = arg + } + kvpo.Comment = getComment(req.comments, kvpo.Key) + pairs[i] = kvpo } return &ArgCommand{ - KeyValuePairOptional: kvpo, - withNameAndCode: newWithNameAndCode(req), + Args: pairs, + withNameAndCode: newWithNameAndCode(req), }, nil } @@ -650,3 +658,15 @@ func errBlankCommandNames(command string) error { func errTooManyArguments(command string) error { return errors.Errorf("Bad input to %s, too many arguments", command) } + +func getComment(comments []string, name string) string { + if name == "" { + return "" + } + for _, line := range comments { + if strings.HasPrefix(line, name+" ") { + return strings.TrimPrefix(line, name+" ") + } + } + return "" +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go index 441824c8b50465645d7190f4f7fe72276eeb361e..c0d0a55d1224096c44bea1cb089093b977e04d2c 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go @@ -40,7 +40,7 @@ func parseSubCommand(rest string, d *directives) (*Node, map[string]bool, error) return nil, nil, nil } - child, err := newNodeFromLine(rest, d) + child, err := newNodeFromLine(rest, d, nil) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go index dc6d17848a12391e54ee5b093caabbe295c46441..1e30e208bf992b6cf7cda36b788ca08df80c5103 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go @@ -28,14 +28,15 @@ import ( // works a little more effectively than a "proper" parse tree for our needs. // type Node struct { - Value string // actual content - Next *Node // the next item in the current sexp - Children []*Node // the children of this sexp - Attributes map[string]bool // special attributes for this node - Original string // original line used before parsing - Flags []string // only top Node should have this set - StartLine int // the line in the original dockerfile where the node begins - EndLine int // the line in the original dockerfile where the node ends + Value string // actual content + Next *Node // the next item in the current sexp + Children []*Node // the children of this sexp + Attributes map[string]bool // special attributes for this node + Original string // original line used before parsing + Flags []string // only top Node should have this set + StartLine int // the line in the original dockerfile where the node begins + EndLine int // the line in the original dockerfile where the node ends + PrevComment []string } // Location return the location of node in source code @@ -107,13 +108,25 @@ type directives struct { seen map[string]struct{} // Whether the escape directive has been seen } -// setEscapeToken sets the default token for escaping characters in a Dockerfile. +// setEscapeToken sets the default token for escaping characters and as line- +// continuation token in a Dockerfile. Only ` (backtick) and \ (backslash) are +// allowed as token. func (d *directives) setEscapeToken(s string) error { - if s != "`" && s != "\\" { + if s != "`" && s != `\` { return errors.Errorf("invalid escape token '%s' does not match ` or \\", s) } d.escapeToken = rune(s[0]) - d.lineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`) + // The escape token is used both to escape characters in a line and as line + // continuation token. If it's the last non-whitespace token, it is used as + // line-continuation token, *unless* preceded by an escape-token. + // + // The second branch in the regular expression handles line-continuation + // tokens on their own line, which don't have any character preceding them. + // + // Due to Go lacking negative look-ahead matching, this regular expression + // does not currently handle a line-continuation token preceded by an *escaped* + // escape-token ("foo \\\"). + d.lineContinuationRegex = regexp.MustCompile(`([^\` + s + `])\` + s + `[ \t]*$|^\` + s + `[ \t]*$`) return nil } @@ -191,7 +204,7 @@ func init() { // newNodeFromLine splits the line into parts, and dispatches to a function // based on the command and command arguments. A Node is created from the // result of the dispatch. -func newNodeFromLine(line string, d *directives) (*Node, error) { +func newNodeFromLine(line string, d *directives, comments []string) (*Node, error) { cmd, flags, args, err := splitCommand(line) if err != nil { return nil, err @@ -208,11 +221,12 @@ func newNodeFromLine(line string, d *directives) (*Node, error) { } return &Node{ - Value: cmd, - Original: line, - Flags: flags, - Next: next, - Attributes: attrs, + Value: cmd, + Original: line, + Flags: flags, + Next: next, + Attributes: attrs, + PrevComment: comments, }, nil } @@ -239,6 +253,7 @@ func Parse(rwc io.Reader) (*Result, error) { root := &Node{StartLine: -1} scanner := bufio.NewScanner(rwc) warnings := []string{} + var comments []string var err error for scanner.Scan() { @@ -247,6 +262,14 @@ func Parse(rwc io.Reader) (*Result, error) { // First line, strip the byte-order-marker if present bytesRead = bytes.TrimPrefix(bytesRead, utf8bom) } + if isComment(bytesRead) { + comment := strings.TrimSpace(string(bytesRead[1:])) + if comment == "" { + comments = nil + } else { + comments = append(comments, comment) + } + } bytesRead, err = processLine(d, bytesRead, true) if err != nil { return nil, withLocation(err, currentLine, 0) @@ -285,10 +308,11 @@ func Parse(rwc io.Reader) (*Result, error) { warnings = append(warnings, "[WARNING]: Empty continuation line found in:\n "+line) } - child, err := newNodeFromLine(line, d) + child, err := newNodeFromLine(line, d, comments) if err != nil { return nil, withLocation(err, startLine, currentLine) } + comments = nil root.AddChild(child, startLine, currentLine) } @@ -327,7 +351,7 @@ var utf8bom = []byte{0xEF, 0xBB, 0xBF} func trimContinuationCharacter(line string, d *directives) (string, bool) { if d.lineContinuationRegex.MatchString(line) { - line = d.lineContinuationRegex.ReplaceAllString(line, "") + line = d.lineContinuationRegex.ReplaceAllString(line, "$1") return line, false } return line, true diff --git a/vendor/github.com/moby/buildkit/frontend/frontend.go b/vendor/github.com/moby/buildkit/frontend/frontend.go index e55b6af7d1591be1885ec6a030f4ac0a5ce63080..fea8e14942e15ec3ffafd7a6379f4e7224b98e80 100644 --- a/vendor/github.com/moby/buildkit/frontend/frontend.go +++ b/vendor/github.com/moby/buildkit/frontend/frontend.go @@ -7,12 +7,13 @@ import ( "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/executor" gw "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver/pb" digest "github.com/opencontainers/go-digest" ) type Frontend interface { - Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string, inputs map[string]*pb.Definition, sid string) (*Result, error) + Solve(ctx context.Context, llb FrontendLLBBridge, opt map[string]string, inputs map[string]*pb.Definition, sid string, sm *session.Manager) (*Result, error) } type FrontendLLBBridge interface { diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go index bc7f96b5fefd04f5cde7b1e29e17c9df80b64b24..9f0b7f019811526c6b0da534acddad7b16c77824 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go @@ -2,6 +2,7 @@ package client import ( "context" + "io" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/solver/pb" @@ -16,6 +17,63 @@ type Client interface { ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) BuildOpts() BuildOpts Inputs(ctx context.Context) (map[string]llb.State, error) + NewContainer(ctx context.Context, req NewContainerRequest) (Container, error) +} + +// NewContainerRequest encapsulates the requirements for a client to define a +// new container, without defining the initial process. +type NewContainerRequest struct { + Mounts []Mount + NetMode pb.NetMode + Platform *pb.Platform + Constraints *pb.WorkerConstraints +} + +// Mount allows clients to specify a filesystem mount. A Reference to a +// previously solved Result is required. +type Mount struct { + Selector string + Dest string + Ref Reference + Readonly bool + MountType pb.MountType + CacheOpt *pb.CacheOpt + SecretOpt *pb.SecretOpt + SSHOpt *pb.SSHOpt +} + +// Container is used to start new processes inside a container and release the +// container resources when done. +type Container interface { + Start(context.Context, StartRequest) (ContainerProcess, error) + Release(context.Context) error +} + +// StartRequest encapsulates the arguments to define a process within a +// container. +type StartRequest struct { + Args []string + Env []string + User string + Cwd string + Tty bool + Stdin io.ReadCloser + Stdout, Stderr io.WriteCloser + SecurityMode pb.SecurityMode +} + +// WinSize is same as executor.WinSize, copied here to prevent circular package +// dependencies. +type WinSize struct { + Rows uint32 + Cols uint32 +} + +// ContainerProcess represents a process within a container. +type ContainerProcess interface { + Wait() error + Resize(ctx context.Context, size WinSize) error + // TODO Signal(ctx context.Context, sig os.Signal) } type Reference interface { diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/container.go b/vendor/github.com/moby/buildkit/frontend/gateway/container.go new file mode 100644 index 0000000000000000000000000000000000000000..0aad15efaee9dc42590e58de606fefa973cb81a0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/container.go @@ -0,0 +1,373 @@ +package gateway + +import ( + "context" + "fmt" + "runtime" + "sort" + "strings" + "sync" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/mounts" + opspb "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/stack" + utilsystem "github.com/moby/buildkit/util/system" + "github.com/moby/buildkit/worker" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +type NewContainerRequest struct { + ContainerID string + NetMode opspb.NetMode + Mounts []Mount + Platform *opspb.Platform + Constraints *opspb.WorkerConstraints +} + +// Mount used for the gateway.Container is nearly identical to the client.Mount +// except is has a RefProxy instead of Ref to allow for a common abstraction +// between gateway clients. +type Mount struct { + Dest string + Selector string + Readonly bool + MountType opspb.MountType + RefProxy solver.ResultProxy + CacheOpt *opspb.CacheOpt + SecretOpt *opspb.SecretOpt + SSHOpt *opspb.SSHOpt +} + +func toProtoMount(m Mount) *opspb.Mount { + return &opspb.Mount{ + Selector: m.Selector, + Dest: m.Dest, + Readonly: m.Readonly, + MountType: m.MountType, + CacheOpt: m.CacheOpt, + SecretOpt: m.SecretOpt, + SSHOpt: m.SSHOpt, + } +} + +func NewContainer(ctx context.Context, e executor.Executor, sm *session.Manager, g session.Group, req NewContainerRequest) (client.Container, error) { + ctx, cancel := context.WithCancel(ctx) + eg, ctx := errgroup.WithContext(ctx) + platform := opspb.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + } + if req.Platform != nil { + platform = *req.Platform + } + ctr := &gatewayContainer{ + id: req.ContainerID, + netMode: req.NetMode, + platform: platform, + executor: e, + errGroup: eg, + ctx: ctx, + cancel: cancel, + } + + makeMutable := func(worker worker.Worker, ref cache.ImmutableRef) (cache.MutableRef, error) { + mRef, err := worker.CacheManager().New(ctx, ref, g) + if err != nil { + return nil, stack.Enable(err) + } + ctr.cleanup = append(ctr.cleanup, func() error { + return stack.Enable(mRef.Release(context.TODO())) + }) + return mRef, nil + } + + var mm *mounts.MountManager + mnts := req.Mounts + + for i, m := range mnts { + if m.Dest == opspb.RootMount && m.RefProxy != nil { + res, err := m.RefProxy.Result(ctx) + if err != nil { + return nil, stack.Enable(err) + } + workerRef, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference for exec %T", res.Sys()) + } + + name := fmt.Sprintf("container %s", req.ContainerID) + mm = mounts.NewMountManager(name, workerRef.Worker.CacheManager(), sm, workerRef.Worker.MetadataStore()) + + ctr.rootFS = mountWithSession(workerRef.ImmutableRef, g) + if !m.Readonly { + ref, err := makeMutable(workerRef.Worker, workerRef.ImmutableRef) + if err != nil { + return nil, stack.Enable(err) + } + ctr.rootFS = mountWithSession(ref, g) + } + + // delete root mount from list, handled here + mnts = append(mnts[:i], mnts[i+1:]...) + break + } + } + + if ctr.rootFS.Src == nil { + return nil, errors.Errorf("root mount required") + } + + for _, m := range mnts { + var ref cache.ImmutableRef + var mountable cache.Mountable + if m.RefProxy != nil { + res, err := m.RefProxy.Result(ctx) + if err != nil { + return nil, stack.Enable(err) + } + workerRef, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference for exec %T", res.Sys()) + } + ref = workerRef.ImmutableRef + mountable = ref + + if !m.Readonly { + mountable, err = makeMutable(workerRef.Worker, ref) + if err != nil { + return nil, stack.Enable(err) + } + } + } + switch m.MountType { + case opspb.MountType_BIND: + // nothing to do here + case opspb.MountType_CACHE: + mRef, err := mm.MountableCache(ctx, toProtoMount(m), ref, g) + if err != nil { + return nil, err + } + mountable = mRef + ctr.cleanup = append(ctr.cleanup, func() error { + return stack.Enable(mRef.Release(context.TODO())) + }) + case opspb.MountType_TMPFS: + mountable = mm.MountableTmpFS() + case opspb.MountType_SECRET: + var err error + mountable, err = mm.MountableSecret(ctx, toProtoMount(m), g) + if err != nil { + return nil, err + } + if mountable == nil { + continue + } + case opspb.MountType_SSH: + var err error + mountable, err = mm.MountableSSH(ctx, toProtoMount(m), g) + if err != nil { + return nil, err + } + if mountable == nil { + continue + } + default: + return nil, errors.Errorf("mount type %s not implemented", m.MountType) + } + + // validate that there is a mount + if mountable == nil { + return nil, errors.Errorf("mount %s has no input", m.Dest) + } + + execMount := executor.Mount{ + Src: mountableWithSession(mountable, g), + Selector: m.Selector, + Dest: m.Dest, + Readonly: m.Readonly, + } + + ctr.mounts = append(ctr.mounts, execMount) + } + + // sort mounts so parents are mounted first + sort.Slice(ctr.mounts, func(i, j int) bool { + return ctr.mounts[i].Dest < ctr.mounts[j].Dest + }) + + return ctr, nil +} + +type gatewayContainer struct { + id string + netMode opspb.NetMode + platform opspb.Platform + rootFS executor.Mount + mounts []executor.Mount + executor executor.Executor + started bool + errGroup *errgroup.Group + mu sync.Mutex + cleanup []func() error + ctx context.Context + cancel func() +} + +func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartRequest) (client.ContainerProcess, error) { + resize := make(chan executor.WinSize) + procInfo := executor.ProcessInfo{ + Meta: executor.Meta{ + Args: req.Args, + Env: req.Env, + User: req.User, + Cwd: req.Cwd, + Tty: req.Tty, + NetMode: gwCtr.netMode, + SecurityMode: req.SecurityMode, + }, + Stdin: req.Stdin, + Stdout: req.Stdout, + Stderr: req.Stderr, + Resize: resize, + } + if procInfo.Meta.Cwd == "" { + procInfo.Meta.Cwd = "/" + } + procInfo.Meta.Env = addDefaultEnvvar(procInfo.Meta.Env, "PATH", utilsystem.DefaultPathEnv(gwCtr.platform.OS)) + if req.Tty { + procInfo.Meta.Env = addDefaultEnvvar(procInfo.Meta.Env, "TERM", "xterm") + } + + // mark that we have started on the first call to execProcess for this + // container, so that future calls will call Exec rather than Run + gwCtr.mu.Lock() + started := gwCtr.started + gwCtr.started = true + gwCtr.mu.Unlock() + + eg, ctx := errgroup.WithContext(gwCtr.ctx) + gwProc := &gatewayContainerProcess{ + resize: resize, + errGroup: eg, + groupCtx: ctx, + } + + if !started { + startedCh := make(chan struct{}) + gwProc.errGroup.Go(func() error { + logrus.Debugf("Starting new container for %s with args: %q", gwCtr.id, procInfo.Meta.Args) + err := gwCtr.executor.Run(ctx, gwCtr.id, gwCtr.rootFS, gwCtr.mounts, procInfo, startedCh) + return stack.Enable(err) + }) + select { + case <-ctx.Done(): + case <-startedCh: + } + } else { + gwProc.errGroup.Go(func() error { + logrus.Debugf("Execing into container %s with args: %q", gwCtr.id, procInfo.Meta.Args) + err := gwCtr.executor.Exec(ctx, gwCtr.id, procInfo) + return stack.Enable(err) + }) + } + + gwCtr.errGroup.Go(gwProc.errGroup.Wait) + + return gwProc, nil +} + +func (gwCtr *gatewayContainer) Release(ctx context.Context) error { + gwCtr.cancel() + err1 := gwCtr.errGroup.Wait() + + var err2 error + for i := len(gwCtr.cleanup) - 1; i >= 0; i-- { // call in LIFO order + err := gwCtr.cleanup[i]() + if err2 == nil { + err2 = err + } + } + + if err1 != nil { + return stack.Enable(err1) + } + return stack.Enable(err2) +} + +type gatewayContainerProcess struct { + errGroup *errgroup.Group + groupCtx context.Context + resize chan<- executor.WinSize + mu sync.Mutex +} + +func (gwProc *gatewayContainerProcess) Wait() error { + err := stack.Enable(gwProc.errGroup.Wait()) + gwProc.mu.Lock() + defer gwProc.mu.Unlock() + close(gwProc.resize) + return err +} + +func (gwProc *gatewayContainerProcess) Resize(ctx context.Context, size client.WinSize) error { + gwProc.mu.Lock() + defer gwProc.mu.Unlock() + + // is the container done or should we proceed with sending event? + select { + case <-gwProc.groupCtx.Done(): + return nil + case <-ctx.Done(): + return nil + default: + } + + // now we select on contexts again in case p.resize blocks b/c + // container no longer reading from it. In that case when + // the errgroup finishes we want to unblock on the write + // and exit + select { + case <-gwProc.groupCtx.Done(): + case <-ctx.Done(): + case gwProc.resize <- executor.WinSize{Cols: size.Cols, Rows: size.Rows}: + } + return nil +} + +func addDefaultEnvvar(env []string, k, v string) []string { + for _, e := range env { + if strings.HasPrefix(e, k+"=") { + return env + } + } + return append(env, k+"="+v) +} + +func mountWithSession(m cache.Mountable, g session.Group) executor.Mount { + _, readonly := m.(cache.ImmutableRef) + return executor.Mount{ + Src: mountableWithSession(m, g), + Readonly: readonly, + } +} + +func mountableWithSession(m cache.Mountable, g session.Group) executor.Mountable { + return &mountable{m: m, g: g} +} + +type mountable struct { + m cache.Mountable + g session.Group +} + +func (m *mountable) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { + return m.m.Mount(ctx, readonly, m.g) +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go index 241be88e8405cad1d0629294de666ca01c8f3411..368787f327047964304adf4ad6fabd46a1f6122e 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go @@ -4,13 +4,16 @@ import ( "context" "sync" - "github.com/moby/buildkit/cache" cacheutil "github.com/moby/buildkit/cache/util" clienttypes "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/frontend/gateway" "github.com/moby/buildkit/frontend/gateway/client" gwpb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver" opspb "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" @@ -19,12 +22,13 @@ import ( fstypes "github.com/tonistiigi/fsutil/types" ) -func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, workerInfos []clienttypes.WorkerInfo, sid string) (*bridgeClient, error) { +func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, workerInfos []clienttypes.WorkerInfo, sid string, sm *session.Manager) (*bridgeClient, error) { return &bridgeClient{ opts: opts, inputs: inputs, FrontendLLBBridge: llbBridge, sid: sid, + sm: sm, workerInfos: workerInfos, final: map[*ref]struct{}{}, }, nil @@ -32,14 +36,14 @@ func llbBridgeToGatewayClient(ctx context.Context, llbBridge frontend.FrontendLL type bridgeClient struct { frontend.FrontendLLBBridge - mu sync.Mutex - opts map[string]string - inputs map[string]*opspb.Definition - final map[*ref]struct{} - sid string - exporterAttr map[string][]byte - refs []*ref - workerInfos []clienttypes.WorkerInfo + mu sync.Mutex + opts map[string]string + inputs map[string]*opspb.Definition + final map[*ref]struct{} + sid string + sm *session.Manager + refs []*ref + workerInfos []clienttypes.WorkerInfo } func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*client.Result, error) { @@ -57,7 +61,7 @@ func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*cli cRes := &client.Result{} c.mu.Lock() for k, r := range res.Refs { - rr, err := newRef(r) + rr, err := newRef(r, session.NewGroup(c.sid)) if err != nil { return nil, err } @@ -65,7 +69,7 @@ func (c *bridgeClient) Solve(ctx context.Context, req client.SolveRequest) (*cli cRes.AddRef(k, rr) } if r := res.Ref; r != nil { - rr, err := newRef(r) + rr, err := newRef(r, session.NewGroup(c.sid)) if err != nil { return nil, err } @@ -150,12 +154,48 @@ func (c *bridgeClient) discard(err error) { } } +func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainerRequest) (client.Container, error) { + ctrReq := gateway.NewContainerRequest{ + ContainerID: identity.NewID(), + NetMode: req.NetMode, + } + + for _, m := range req.Mounts { + var refProxy solver.ResultProxy + if m.Ref != nil { + var ok bool + refProxy, ok = m.Ref.(*ref) + if !ok { + return nil, errors.Errorf("unexpected Ref type: %T", m.Ref) + } + } + ctrReq.Mounts = append(ctrReq.Mounts, gateway.Mount{ + Dest: m.Dest, + Selector: m.Selector, + Readonly: m.Readonly, + MountType: m.MountType, + RefProxy: refProxy, + CacheOpt: m.CacheOpt, + SecretOpt: m.SecretOpt, + SSHOpt: m.SSHOpt, + }) + } + + group := session.NewGroup(c.sid) + ctr, err := gateway.NewContainer(ctx, c, c.sm, group, ctrReq) + if err != nil { + return nil, err + } + return ctr, nil +} + type ref struct { solver.ResultProxy + session session.Group } -func newRef(r solver.ResultProxy) (*ref, error) { - return &ref{ResultProxy: r}, nil +func newRef(r solver.ResultProxy, s session.Group) (*ref, error) { + return &ref{ResultProxy: r, session: s}, nil } func (r *ref) ToState() (st llb.State, err error) { @@ -167,7 +207,7 @@ func (r *ref) ToState() (st llb.State, err error) { } func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) { - ref, err := r.getImmutableRef(ctx) + m, err := r.getMountable(ctx) if err != nil { return nil, err } @@ -180,11 +220,11 @@ func (r *ref) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, err Length: r.Length, } } - return cacheutil.ReadFile(ctx, ref, newReq) + return cacheutil.ReadFile(ctx, m, newReq) } func (r *ref) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstypes.Stat, error) { - ref, err := r.getImmutableRef(ctx) + m, err := r.getMountable(ctx) if err != nil { return nil, err } @@ -192,18 +232,18 @@ func (r *ref) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstype Path: req.Path, IncludePattern: req.IncludePattern, } - return cacheutil.ReadDir(ctx, ref, newReq) + return cacheutil.ReadDir(ctx, m, newReq) } func (r *ref) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.Stat, error) { - ref, err := r.getImmutableRef(ctx) + m, err := r.getMountable(ctx) if err != nil { return nil, err } - return cacheutil.StatFile(ctx, ref, req.Path) + return cacheutil.StatFile(ctx, m, req.Path) } -func (r *ref) getImmutableRef(ctx context.Context) (cache.ImmutableRef, error) { +func (r *ref) getMountable(ctx context.Context) (snapshot.Mountable, error) { rr, err := r.ResultProxy.Result(ctx) if err != nil { return nil, err @@ -212,5 +252,5 @@ func (r *ref) getImmutableRef(ctx context.Context) (cache.ImmutableRef, error) { if !ok { return nil, errors.Errorf("invalid ref: %T", rr.Sys()) } - return ref.ImmutableRef, nil + return ref.ImmutableRef.Mount(ctx, true, r.session) } diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go index 571aff8c9da65b70a56aa6d57e2075d3a617795d..9a6d602d1a7c549e4b065da5568d0de4c420c06d 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/frontend.go @@ -5,6 +5,7 @@ import ( "github.com/moby/buildkit/frontend" "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver/pb" ) @@ -20,8 +21,8 @@ type GatewayForwarder struct { f client.BuildFunc } -func (gf *GatewayForwarder) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*pb.Definition, sid string) (retRes *frontend.Result, retErr error) { - c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts, inputs, gf.workers.WorkerInfos(), sid) +func (gf *GatewayForwarder) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*pb.Definition, sid string, sm *session.Manager) (retRes *frontend.Result, retErr error) { + c, err := llbBridgeToGatewayClient(ctx, llbBridge, opts, inputs, gf.workers.WorkerInfos(), sid, sm) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go index f2305e0d09aec85a8625bb14183280279af0a0aa..55aae031fd5f44d5158d96e2d93c452398d4e6c6 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go @@ -12,7 +12,9 @@ import ( "sync" "time" + "github.com/containerd/containerd" "github.com/docker/distribution/reference" + "github.com/gogo/googleapis/google/rpc" gogotypes "github.com/gogo/protobuf/types" "github.com/golang/protobuf/ptypes/any" apitypes "github.com/moby/buildkit/api/types" @@ -23,20 +25,27 @@ import ( "github.com/moby/buildkit/executor" "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend" + gwclient "github.com/moby/buildkit/frontend/gateway/client" pb "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/errdefs" opspb "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" "github.com/moby/buildkit/util/grpcerrors" + "github.com/moby/buildkit/util/stack" "github.com/moby/buildkit/util/tracing" "github.com/moby/buildkit/worker" + "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/http2" + "golang.org/x/sync/errgroup" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/health" "google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/status" @@ -67,7 +76,7 @@ func filterPrefix(opts map[string]string, pfx string) map[string]string { return m } -func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, sid string) (*frontend.Result, error) { +func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.FrontendLLBBridge, opts map[string]string, inputs map[string]*opspb.Definition, sid string, sm *session.Manager) (*frontend.Result, error) { source, ok := opts[keySource] if !ok { return nil, errors.Errorf("no source specified for gateway") @@ -75,6 +84,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten _, isDevel := opts[keyDevel] var img specs.Image + var mfstDigest digest.Digest var rootFS cache.MutableRef var readonly bool // TODO: try to switch to read-only by default. @@ -105,7 +115,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten return nil, errors.Errorf("invalid ref: %T", res.Sys()) } - rootFS, err = workerRef.Worker.CacheManager().New(ctx, workerRef.ImmutableRef) + rootFS, err = workerRef.Worker.CacheManager().New(ctx, workerRef.ImmutableRef, session.NewGroup(sid)) if err != nil { return nil, err } @@ -126,6 +136,7 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten if err != nil { return nil, err } + mfstDigest = dgst if err := json.Unmarshal(config, &img); err != nil { return nil, err @@ -168,19 +179,13 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten if !ok { return nil, errors.Errorf("invalid ref: %T", r.Sys()) } - rootFS, err = workerRef.Worker.CacheManager().New(ctx, workerRef.ImmutableRef) + rootFS, err = workerRef.Worker.CacheManager().New(ctx, workerRef.ImmutableRef, session.NewGroup(sid)) if err != nil { return nil, err } defer rootFS.Release(context.TODO()) } - lbf, ctx, err := newLLBBridgeForwarder(ctx, llbBridge, gf.workers, inputs, sid) - defer lbf.conn.Close() - if err != nil { - return nil, err - } - args := []string{"/run"} env := []string{} cwd := "/" @@ -207,8 +212,6 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten } env = append(env, "BUILDKIT_WORKERS="+string(dt)) - defer lbf.Discard() - env = append(env, "BUILDKIT_EXPORTEDPRODUCT="+apicaps.ExportedProduct) meta := executor.Meta{ @@ -224,7 +227,27 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten } } - err = llbBridge.Run(ctx, "", rootFS, nil, executor.ProcessInfo{Meta: meta, Stdin: lbf.Stdin, Stdout: lbf.Stdout, Stderr: os.Stderr}, nil) + curCaps := getCaps(img.Config.Labels["moby.buildkit.frontend.caps"]) + addCapsForKnownFrontends(curCaps, mfstDigest) + reqCaps := getCaps(opts["frontend.caps"]) + if len(inputs) > 0 { + reqCaps["moby.buildkit.frontend.inputs"] = struct{}{} + } + + for c := range reqCaps { + if _, ok := curCaps[c]; !ok { + return nil, stack.Enable(grpcerrors.WrapCode(errdefs.NewUnsupportedFrontendCapError(c), codes.Unimplemented)) + } + } + + lbf, ctx, err := serveLLBBridgeForwarder(ctx, llbBridge, gf.workers, inputs, sid, sm) + defer lbf.conn.Close() //nolint + if err != nil { + return nil, err + } + defer lbf.Discard() + + err = llbBridge.Run(ctx, "", mountWithSession(rootFS, session.NewGroup(sid)), nil, executor.ProcessInfo{Meta: meta, Stdin: lbf.Stdin, Stdout: lbf.Stdout, Stderr: os.Stderr}, nil) if err != nil { if errors.Is(err, context.Canceled) && lbf.isErrServerClosed { @@ -302,7 +325,11 @@ func (lbf *llbBridgeForwarder) Result() (*frontend.Result, error) { return lbf.result, nil } -func NewBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition, sid string) *llbBridgeForwarder { +func NewBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition, sid string, sm *session.Manager) LLBBridgeForwarder { + return newBridgeForwarder(ctx, llbBridge, workers, inputs, sid, sm) +} + +func newBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition, sid string, sm *session.Manager) *llbBridgeForwarder { lbf := &llbBridgeForwarder{ callCtx: ctx, llbBridge: llbBridge, @@ -312,13 +339,15 @@ func NewBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridg workers: workers, inputs: inputs, sid: sid, + sm: sm, + ctrs: map[string]gwclient.Container{}, } return lbf } -func newLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition, sid string) (*llbBridgeForwarder, context.Context, error) { +func serveLLBBridgeForwarder(ctx context.Context, llbBridge frontend.FrontendLLBBridge, workers frontend.WorkerInfos, inputs map[string]*opspb.Definition, sid string, sm *session.Manager) (*llbBridgeForwarder, context.Context, error) { ctx, cancel := context.WithCancel(ctx) - lbf := NewBridgeForwarder(ctx, llbBridge, workers, inputs, sid) + lbf := newBridgeForwarder(ctx, llbBridge, workers, inputs, sid, sm) server := grpc.NewServer(grpc.UnaryInterceptor(grpcerrors.UnaryServerInterceptor), grpc.StreamInterceptor(grpcerrors.StreamServerInterceptor)) grpc_health_v1.RegisterHealthServer(server, health.NewServer()) pb.RegisterLLBBridgeServer(server, lbf) @@ -393,6 +422,7 @@ type LLBBridgeForwarder interface { pb.LLBBridgeServer Done() <-chan struct{} Result() (*frontend.Result, error) + Discard() } type llbBridgeForwarder struct { @@ -406,12 +436,14 @@ type llbBridgeForwarder struct { doneCh chan struct{} // closed when result or err become valid through a call to a Return result *frontend.Result err error - exporterAttr map[string][]byte workers frontend.WorkerInfos inputs map[string]*opspb.Definition isErrServerClosed bool sid string + sm *session.Manager *pipe + ctrs map[string]gwclient.Container + ctrsMu sync.Mutex } func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.ResolveImageConfigRequest) (*pb.ResolveImageConfigResponse, error) { @@ -592,7 +624,12 @@ func (lbf *llbBridgeForwarder) ReadFile(ctx context.Context, req *pb.ReadFileReq } } - dt, err := cacheutil.ReadFile(ctx, workerRef.ImmutableRef, newReq) + m, err := workerRef.ImmutableRef.Mount(ctx, true, session.NewGroup(lbf.sid)) + if err != nil { + return nil, err + } + + dt, err := cacheutil.ReadFile(ctx, m, newReq) if err != nil { return nil, err } @@ -624,7 +661,11 @@ func (lbf *llbBridgeForwarder) ReadDir(ctx context.Context, req *pb.ReadDirReque Path: req.DirPath, IncludePattern: req.IncludePattern, } - entries, err := cacheutil.ReadDir(ctx, workerRef.ImmutableRef, newReq) + m, err := workerRef.ImmutableRef.Mount(ctx, true, session.NewGroup(lbf.sid)) + if err != nil { + return nil, err + } + entries, err := cacheutil.ReadDir(ctx, m, newReq) if err != nil { return nil, err } @@ -651,8 +692,11 @@ func (lbf *llbBridgeForwarder) StatFile(ctx context.Context, req *pb.StatFileReq if !ok { return nil, errors.Errorf("invalid ref: %T", r.Sys()) } - - st, err := cacheutil.StatFile(ctx, workerRef.ImmutableRef, req.Path) + m, err := workerRef.ImmutableRef.Mount(ctx, true, session.NewGroup(lbf.sid)) + if err != nil { + return nil, err + } + st, err := cacheutil.StatFile(ctx, m, req.Path) if err != nil { return nil, err } @@ -686,47 +730,46 @@ func (lbf *llbBridgeForwarder) Return(ctx context.Context, in *pb.ReturnRequest) Message: in.Error.Message, Details: convertGogoAny(in.Error.Details), }))) - } else { - r := &frontend.Result{ - Metadata: in.Result.Metadata, - } + } + r := &frontend.Result{ + Metadata: in.Result.Metadata, + } - switch res := in.Result.Result.(type) { - case *pb.Result_RefDeprecated: - ref, err := lbf.convertRef(res.RefDeprecated) + switch res := in.Result.Result.(type) { + case *pb.Result_RefDeprecated: + ref, err := lbf.convertRef(res.RefDeprecated) + if err != nil { + return nil, err + } + r.Ref = ref + case *pb.Result_RefsDeprecated: + m := map[string]solver.ResultProxy{} + for k, id := range res.RefsDeprecated.Refs { + ref, err := lbf.convertRef(id) if err != nil { return nil, err } - r.Ref = ref - case *pb.Result_RefsDeprecated: - m := map[string]solver.ResultProxy{} - for k, id := range res.RefsDeprecated.Refs { - ref, err := lbf.convertRef(id) - if err != nil { - return nil, err - } - m[k] = ref - } - r.Refs = m - case *pb.Result_Ref: - ref, err := lbf.convertRef(res.Ref.Id) + m[k] = ref + } + r.Refs = m + case *pb.Result_Ref: + ref, err := lbf.convertRef(res.Ref.Id) + if err != nil { + return nil, err + } + r.Ref = ref + case *pb.Result_Refs: + m := map[string]solver.ResultProxy{} + for k, ref := range res.Refs.Refs { + ref, err := lbf.convertRef(ref.Id) if err != nil { return nil, err } - r.Ref = ref - case *pb.Result_Refs: - m := map[string]solver.ResultProxy{} - for k, ref := range res.Refs.Refs { - ref, err := lbf.convertRef(ref.Id) - if err != nil { - return nil, err - } - m[k] = ref - } - r.Refs = m + m[k] = ref } - return lbf.setResult(r, nil) + r.Refs = m } + return lbf.setResult(r, nil) } func (lbf *llbBridgeForwarder) Inputs(ctx context.Context, in *pb.InputsRequest) (*pb.InputsResponse, error) { @@ -735,6 +778,417 @@ func (lbf *llbBridgeForwarder) Inputs(ctx context.Context, in *pb.InputsRequest) }, nil } +func (lbf *llbBridgeForwarder) NewContainer(ctx context.Context, in *pb.NewContainerRequest) (_ *pb.NewContainerResponse, err error) { + logrus.Debugf("|<--- NewContainer %s", in.ContainerID) + ctrReq := NewContainerRequest{ + ContainerID: in.ContainerID, + NetMode: in.Network, + Platform: in.Platform, + Constraints: in.Constraints, + } + + for _, m := range in.Mounts { + var refProxy solver.ResultProxy + if m.ResultID != "" { + refProxy, err = lbf.convertRef(m.ResultID) + if err != nil { + return nil, errors.Wrapf(err, "failed to find ref %s for %q mount", m.ResultID, m.Dest) + } + } + ctrReq.Mounts = append(ctrReq.Mounts, Mount{ + Dest: m.Dest, + Selector: m.Selector, + Readonly: m.Readonly, + MountType: m.MountType, + RefProxy: refProxy, + CacheOpt: m.CacheOpt, + SecretOpt: m.SecretOpt, + SSHOpt: m.SSHOpt, + }) + } + + // Not using `ctx` here because it will get cancelled as soon as NewContainer returns + // and we want the context to live for the duration of the container. + group := session.NewGroup(lbf.sid) + ctr, err := NewContainer(context.Background(), lbf.llbBridge, lbf.sm, group, ctrReq) + if err != nil { + return nil, stack.Enable(err) + } + defer func() { + if err != nil { + ctr.Release(ctx) // ensure release on error + } + }() + + lbf.ctrsMu.Lock() + defer lbf.ctrsMu.Unlock() + // ensure we are not clobbering a dup container id request + if _, ok := lbf.ctrs[in.ContainerID]; ok { + return nil, stack.Enable(status.Errorf(codes.AlreadyExists, "Container %s already exists", in.ContainerID)) + } + lbf.ctrs[in.ContainerID] = ctr + return &pb.NewContainerResponse{}, nil +} + +func (lbf *llbBridgeForwarder) ReleaseContainer(ctx context.Context, in *pb.ReleaseContainerRequest) (*pb.ReleaseContainerResponse, error) { + logrus.Debugf("|<--- ReleaseContainer %s", in.ContainerID) + lbf.ctrsMu.Lock() + ctr, ok := lbf.ctrs[in.ContainerID] + delete(lbf.ctrs, in.ContainerID) + lbf.ctrsMu.Unlock() + if !ok { + return nil, errors.Errorf("container details for %s not found", in.ContainerID) + } + err := ctr.Release(ctx) + return &pb.ReleaseContainerResponse{}, stack.Enable(err) +} + +type processIO struct { + id string + mu sync.Mutex + resize func(context.Context, gwclient.WinSize) error + done chan struct{} + doneOnce sync.Once + // these track the process side of the io pipe for + // read (fd=0) and write (fd=1, fd=2) + processReaders map[uint32]io.ReadCloser + processWriters map[uint32]io.WriteCloser + // these track the server side of the io pipe, so + // when we receive an EOF over grpc, we will close + // this end + serverWriters map[uint32]io.WriteCloser + serverReaders map[uint32]io.ReadCloser +} + +func newProcessIO(id string, openFds []uint32) *processIO { + pio := &processIO{ + id: id, + processReaders: map[uint32]io.ReadCloser{}, + processWriters: map[uint32]io.WriteCloser{}, + serverReaders: map[uint32]io.ReadCloser{}, + serverWriters: map[uint32]io.WriteCloser{}, + done: make(chan struct{}), + } + + for _, fd := range openFds { + // TODO do we know which way to pipe each fd? For now assume fd0 is for + // reading, and the rest are for writing + r, w := io.Pipe() + if fd == 0 { + pio.processReaders[fd] = r + pio.serverWriters[fd] = w + } else { + pio.processWriters[fd] = w + pio.serverReaders[fd] = r + } + } + + return pio +} + +func (pio *processIO) Close() (err error) { + pio.mu.Lock() + defer pio.mu.Unlock() + for fd, r := range pio.processReaders { + delete(pio.processReaders, fd) + err1 := r.Close() + if err1 != nil && err == nil { + err = stack.Enable(err1) + } + } + for fd, w := range pio.serverReaders { + delete(pio.serverReaders, fd) + err1 := w.Close() + if err1 != nil && err == nil { + err = stack.Enable(err1) + } + } + pio.Done() + return err +} + +func (pio *processIO) Done() { + stillOpen := len(pio.processReaders) + len(pio.processWriters) + len(pio.serverReaders) + len(pio.serverWriters) + if stillOpen == 0 { + pio.doneOnce.Do(func() { + close(pio.done) + }) + } +} + +func (pio *processIO) Write(f *pb.FdMessage) (err error) { + pio.mu.Lock() + writer := pio.serverWriters[f.Fd] + pio.mu.Unlock() + if writer == nil { + return status.Errorf(codes.OutOfRange, "fd %d unavailable to write", f.Fd) + } + defer func() { + if err != nil || f.EOF { + writer.Close() + pio.mu.Lock() + defer pio.mu.Unlock() + delete(pio.serverWriters, f.Fd) + pio.Done() + } + }() + if len(f.Data) > 0 { + _, err = writer.Write(f.Data) + return stack.Enable(err) + } + return nil +} + +type outputWriter struct { + stream pb.LLBBridge_ExecProcessServer + fd uint32 + processID string +} + +func (w *outputWriter) Write(msg []byte) (int, error) { + logrus.Debugf("|---> File Message %s, fd=%d, %d bytes", w.processID, w.fd, len(msg)) + err := w.stream.Send(&pb.ExecMessage{ + ProcessID: w.processID, + Input: &pb.ExecMessage_File{ + File: &pb.FdMessage{ + Fd: w.fd, + Data: msg, + }, + }, + }) + return len(msg), stack.Enable(err) +} + +func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) error { + eg, ctx := errgroup.WithContext(srv.Context()) + + msgs := make(chan *pb.ExecMessage) + + eg.Go(func() error { + defer close(msgs) + for { + execMsg, err := srv.Recv() + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + return stack.Enable(err) + } + switch m := execMsg.GetInput().(type) { + case *pb.ExecMessage_Init: + logrus.Debugf("|<--- Init Message %s", execMsg.ProcessID) + case *pb.ExecMessage_File: + if m.File.EOF { + logrus.Debugf("|<--- File Message %s, fd=%d, EOF", execMsg.ProcessID, m.File.Fd) + } else { + logrus.Debugf("|<--- File Message %s, fd=%d, %d bytes", execMsg.ProcessID, m.File.Fd, len(m.File.Data)) + } + case *pb.ExecMessage_Resize: + logrus.Debugf("|<--- Resize Message %s", execMsg.ProcessID) + } + select { + case <-ctx.Done(): + case msgs <- execMsg: + } + } + }) + + eg.Go(func() error { + pios := make(map[string]*processIO) + // close any stray pios on exit to make sure + // all the associated resources get cleaned up + defer func() { + for _, pio := range pios { + pio.Close() + } + }() + + for { + var execMsg *pb.ExecMessage + select { + case <-ctx.Done(): + return nil + case execMsg = <-msgs: + } + if execMsg == nil { + return nil + } + + pid := execMsg.ProcessID + if pid == "" { + return stack.Enable(status.Errorf(codes.InvalidArgument, "ProcessID required")) + } + + pio, pioFound := pios[pid] + + if data := execMsg.GetFile(); data != nil { + if !pioFound { + return stack.Enable(status.Errorf(codes.NotFound, "IO for process %q not found", pid)) + } + err := pio.Write(data) + if err != nil { + return stack.Enable(err) + } + } else if resize := execMsg.GetResize(); resize != nil { + if !pioFound { + return stack.Enable(status.Errorf(codes.NotFound, "IO for process %q not found", pid)) + } + pio.resize(ctx, gwclient.WinSize{ + Cols: resize.Cols, + Rows: resize.Rows, + }) + } else if init := execMsg.GetInit(); init != nil { + if pioFound { + return stack.Enable(status.Errorf(codes.AlreadyExists, "Process %s already exists", pid)) + } + id := init.ContainerID + lbf.ctrsMu.Lock() + ctr, ok := lbf.ctrs[id] + lbf.ctrsMu.Unlock() + if !ok { + return stack.Enable(status.Errorf(codes.NotFound, "container %q previously released or not created", id)) + } + + initCtx, initCancel := context.WithCancel(context.Background()) + defer initCancel() + + pio := newProcessIO(pid, init.Fds) + pios[pid] = pio + + proc, err := ctr.Start(initCtx, gwclient.StartRequest{ + Args: init.Meta.Args, + Env: init.Meta.Env, + User: init.Meta.User, + Cwd: init.Meta.Cwd, + Tty: init.Tty, + Stdin: pio.processReaders[0], + Stdout: pio.processWriters[1], + Stderr: pio.processWriters[2], + }) + if err != nil { + return stack.Enable(err) + } + pio.resize = proc.Resize + + eg.Go(func() error { + <-pio.done + logrus.Debugf("|---> Done Message %s", pid) + err := srv.Send(&pb.ExecMessage{ + ProcessID: pid, + Input: &pb.ExecMessage_Done{ + Done: &pb.DoneMessage{}, + }, + }) + return stack.Enable(err) + }) + + eg.Go(func() error { + defer func() { + pio.Close() + }() + err := proc.Wait() + + var statusCode uint32 + var exitError *errdefs.ExitError + var statusError *rpc.Status + if err != nil { + statusCode = containerd.UnknownExitStatus + st, _ := status.FromError(grpcerrors.ToGRPC(err)) + stp := st.Proto() + statusError = &rpc.Status{ + Code: stp.Code, + Message: stp.Message, + Details: convertToGogoAny(stp.Details), + } + } + if errors.As(err, &exitError) { + statusCode = exitError.ExitCode + } + logrus.Debugf("|---> Exit Message %s, code=%d, error=%s", pid, statusCode, err) + sendErr := srv.Send(&pb.ExecMessage{ + ProcessID: pid, + Input: &pb.ExecMessage_Exit{ + Exit: &pb.ExitMessage{ + Code: statusCode, + Error: statusError, + }, + }, + }) + + if sendErr != nil && err != nil { + return errors.Wrap(sendErr, err.Error()) + } else if sendErr != nil { + return stack.Enable(sendErr) + } + + if err != nil && statusCode != 0 { + // this was a container exit error which is "normal" so + // don't return this error from the errgroup + return nil + } + return stack.Enable(err) + }) + + logrus.Debugf("|---> Started Message %s", pid) + err = srv.Send(&pb.ExecMessage{ + ProcessID: pid, + Input: &pb.ExecMessage_Started{ + Started: &pb.StartedMessage{}, + }, + }) + if err != nil { + return stack.Enable(err) + } + + // start sending Fd output back to client, this is done after + // StartedMessage so that Fd output will not potentially arrive + // to the client before "Started" as the container starts up. + for fd, file := range pio.serverReaders { + fd, file := fd, file + eg.Go(func() error { + defer func() { + file.Close() + pio.mu.Lock() + defer pio.mu.Unlock() + w := pio.processWriters[fd] + if w != nil { + w.Close() + } + delete(pio.processWriters, fd) + pio.Done() + }() + dest := &outputWriter{ + stream: srv, + fd: uint32(fd), + processID: pid, + } + _, err := io.Copy(dest, file) + // ignore ErrClosedPipe, it is EOF for our usage. + if err != nil && !errors.Is(err, io.ErrClosedPipe) { + return stack.Enable(err) + } + // no error so must be EOF + logrus.Debugf("|---> File Message %s, fd=%d, EOF", pid, fd) + err = srv.Send(&pb.ExecMessage{ + ProcessID: pid, + Input: &pb.ExecMessage_File{ + File: &pb.FdMessage{ + Fd: uint32(fd), + EOF: true, + }, + }, + }) + return stack.Enable(err) + }) + } + } + } + }) + + err := eg.Wait() + return stack.Enable(err) +} + func (lbf *llbBridgeForwarder) convertRef(id string) (solver.ResultProxy, error) { if id == "" { return nil, nil @@ -773,3 +1227,39 @@ func convertGogoAny(in []*gogotypes.Any) []*any.Any { } return out } + +func convertToGogoAny(in []*any.Any) []*gogotypes.Any { + out := make([]*gogotypes.Any, len(in)) + for i := range in { + out[i] = &gogotypes.Any{TypeUrl: in[i].TypeUrl, Value: in[i].Value} + } + return out +} + +func getCaps(label string) map[string]struct{} { + if label == "" { + return make(map[string]struct{}) + } + caps := strings.Split(label, ",") + out := make(map[string]struct{}, len(caps)) + for _, c := range caps { + name := strings.SplitN(c, "+", 2) + if name[0] != "" { + out[name[0]] = struct{}{} + } + } + return out +} + +func addCapsForKnownFrontends(caps map[string]struct{}, dgst digest.Digest) { + // these frontends were built without caps detection but do support inputs + defaults := map[digest.Digest]struct{}{ + "sha256:9ac1c43a60e31dca741a6fe8314130a9cd4c4db0311fbbc636ff992ef60ae76d": {}, // docker/dockerfile:1.1.6 + "sha256:080bd74d8778f83e7b670de193362d8c593c8b14f5c8fb919d28ee8feda0d069": {}, // docker/dockerfile:1.1.7 + "sha256:60543a9d92b92af5088fb2938fb09b2072684af8384399e153e137fe081f8ab4": {}, // docker/dockerfile:1.1.6-experimental + "sha256:de85b2f3a3e8a2f7fe48e8e84a65f6fdd5cd5183afa6412fff9caa6871649c44": {}, // docker/dockerfile:1.1.7-experimental + } + if _, ok := defaults[dgst]; ok { + caps["moby.buildkit.frontend.inputs"] = struct{}{} + } +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go index 5fe33a1963d20eba760b44307a58fa0aacdee777..13f6d5faf9eba7a134adeab3b99dea11a321c0fd 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go @@ -3,38 +3,48 @@ package grpcclient import ( "context" "encoding/json" + "fmt" "io" "net" "os" "strings" + "sync" "time" + "github.com/containerd/containerd" "github.com/gogo/googleapis/google/rpc" gogotypes "github.com/gogo/protobuf/types" "github.com/golang/protobuf/ptypes/any" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/frontend/gateway/client" pb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/solver/errdefs" opspb "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" "github.com/moby/buildkit/util/grpcerrors" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" + "github.com/sirupsen/logrus" fstypes "github.com/tonistiigi/fsutil/types" + "golang.org/x/sync/errgroup" + spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) const frontendPrefix = "BUILDKIT_FRONTEND_OPT_" type GrpcClient interface { + client.Client Run(context.Context, client.BuildFunc) error } func New(ctx context.Context, opts map[string]string, session, product string, c pb.LLBBridgeClient, w []client.WorkerInfo) (GrpcClient, error) { - ctx, cancel := context.WithTimeout(ctx, 15*time.Second) - defer cancel() - resp, err := c.Ping(ctx, &pb.PingRequest{}) + pingCtx, pingCancel := context.WithTimeout(ctx, 15*time.Second) + defer pingCancel() + resp, err := c.Ping(pingCtx, &pb.PingRequest{}) if err != nil { return nil, err } @@ -56,6 +66,7 @@ func New(ctx context.Context, opts map[string]string, session, product string, c caps: pb.Caps.CapSet(resp.FrontendAPICaps), llbCaps: opspb.Caps.CapSet(resp.LLBCaps), requests: map[string]*pb.SolveRequest{}, + execMsgs: newMessageForwarder(ctx, c), }, nil } @@ -167,10 +178,21 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro }() } + defer func() { + err = c.execMsgs.Release() + if err != nil && retError != nil { + retError = err + } + }() + if res, err = f(ctx, c); err != nil { return err } + if res == nil { + return nil + } + if err := c.caps.Supports(pb.CapReturnMap); len(res.Refs) > 1 && err != nil { return err } @@ -253,6 +275,7 @@ type grpcClient struct { caps apicaps.CapSet llbCaps apicaps.CapSet requests map[string]*pb.SolveRequest + execMsgs *messageForwarder } func (c *grpcClient) requestForRef(ref client.Reference) (*pb.SolveRequest, error) { @@ -423,14 +446,460 @@ func (c *grpcClient) Inputs(ctx context.Context) (map[string]llb.State, error) { inputs[key] = llb.NewState(op) } return inputs, nil +} + +// procMessageForwarder is created per container process to act as the +// communication channel between the process and the ExecProcess message +// stream. +type procMessageForwarder struct { + done chan struct{} + closeOnce sync.Once + msgs chan *pb.ExecMessage +} + +func newProcMessageForwarder() *procMessageForwarder { + return &procMessageForwarder{ + done: make(chan struct{}), + msgs: make(chan *pb.ExecMessage), + } +} + +func (b *procMessageForwarder) Send(ctx context.Context, m *pb.ExecMessage) { + select { + case <-ctx.Done(): + case <-b.done: + b.closeOnce.Do(func() { + close(b.msgs) + }) + case b.msgs <- m: + } +} + +func (b *procMessageForwarder) Recv(ctx context.Context) (m *pb.ExecMessage, ok bool) { + select { + case <-ctx.Done(): + return nil, true + case <-b.done: + return nil, false + case m = <-b.msgs: + return m, true + } +} + +func (b *procMessageForwarder) Close() { + close(b.done) + b.Recv(context.Background()) // flush any messages in queue + b.Send(context.Background(), nil) // ensure channel is closed +} +// messageForwarder manages a single grpc stream for ExecProcess to facilitate +// a pub/sub message channel for each new process started from the client +// connection. +type messageForwarder struct { + client pb.LLBBridgeClient + ctx context.Context + cancel func() + eg *errgroup.Group + mu sync.Mutex + pids map[string]*procMessageForwarder + stream pb.LLBBridge_ExecProcessClient + // startOnce used to only start the exec message forwarder once, + // so we only have one exec stream per client + startOnce sync.Once + // startErr tracks the error when initializing the stream, it will + // be returned on subsequent calls to Start + startErr error +} + +func newMessageForwarder(ctx context.Context, client pb.LLBBridgeClient) *messageForwarder { + ctx, cancel := context.WithCancel(ctx) + eg, ctx := errgroup.WithContext(ctx) + return &messageForwarder{ + client: client, + pids: map[string]*procMessageForwarder{}, + ctx: ctx, + cancel: cancel, + eg: eg, + } +} + +func (m *messageForwarder) Start() (err error) { + defer func() { + if err != nil { + m.startErr = err + } + }() + + if m.startErr != nil { + return m.startErr + } + + m.startOnce.Do(func() { + m.stream, err = m.client.ExecProcess(m.ctx) + if err != nil { + return + } + m.eg.Go(func() error { + for { + msg, err := m.stream.Recv() + if errors.Is(err, io.EOF) || grpcerrors.Code(err) == codes.Canceled { + return nil + } + logrus.Debugf("|<--- %s", debugMessage(msg)) + + if err != nil { + return err + } + + m.mu.Lock() + msgs, ok := m.pids[msg.ProcessID] + m.mu.Unlock() + + if !ok { + logrus.Debugf("Received exec message for unregistered process: %s", msg.String()) + continue + } + msgs.Send(m.ctx, msg) + } + }) + }) + return err +} + +func debugMessage(msg *pb.ExecMessage) string { + switch m := msg.GetInput().(type) { + case *pb.ExecMessage_Init: + return fmt.Sprintf("Init Message %s", msg.ProcessID) + case *pb.ExecMessage_File: + if m.File.EOF { + return fmt.Sprintf("File Message %s, fd=%d, EOF", msg.ProcessID, m.File.Fd) + } + return fmt.Sprintf("File Message %s, fd=%d, %d bytes", msg.ProcessID, m.File.Fd, len(m.File.Data)) + case *pb.ExecMessage_Resize: + return fmt.Sprintf("Resize Message %s", msg.ProcessID) + case *pb.ExecMessage_Started: + return fmt.Sprintf("Started Message %s", msg.ProcessID) + case *pb.ExecMessage_Exit: + return fmt.Sprintf("Exit Message %s, code=%d, err=%s", msg.ProcessID, m.Exit.Code, m.Exit.Error) + case *pb.ExecMessage_Done: + return fmt.Sprintf("Done Message %s", msg.ProcessID) + } + return fmt.Sprintf("Unknown Message %s", msg.String()) +} + +func (m *messageForwarder) Send(msg *pb.ExecMessage) error { + m.mu.Lock() + _, ok := m.pids[msg.ProcessID] + defer m.mu.Unlock() + if !ok { + return errors.Errorf("process %s has ended, not sending message %#v", msg.ProcessID, msg.Input) + } + logrus.Debugf("|---> %s", debugMessage(msg)) + return m.stream.Send(msg) +} + +func (m *messageForwarder) Release() error { + m.cancel() + return m.eg.Wait() +} + +func (m *messageForwarder) Register(pid string) *procMessageForwarder { + m.mu.Lock() + defer m.mu.Unlock() + sender := newProcMessageForwarder() + m.pids[pid] = sender + return sender +} + +func (m *messageForwarder) Deregister(pid string) { + m.mu.Lock() + defer m.mu.Unlock() + sender, ok := m.pids[pid] + if !ok { + return + } + delete(m.pids, pid) + sender.Close() +} + +type msgWriter struct { + mux *messageForwarder + fd uint32 + processID string +} + +func (w *msgWriter) Write(msg []byte) (int, error) { + err := w.mux.Send(&pb.ExecMessage{ + ProcessID: w.processID, + Input: &pb.ExecMessage_File{ + File: &pb.FdMessage{ + Fd: w.fd, + Data: msg, + }, + }, + }) + if err != nil { + return 0, err + } + return len(msg), nil +} + +func (c *grpcClient) NewContainer(ctx context.Context, req client.NewContainerRequest) (client.Container, error) { + err := c.caps.Supports(pb.CapGatewayExec) + if err != nil { + return nil, err + } + id := identity.NewID() + var mounts []*opspb.Mount + for _, m := range req.Mounts { + var resultID string + if m.Ref != nil { + ref, ok := m.Ref.(*reference) + if !ok { + return nil, errors.Errorf("unexpected type for reference, got %T", m.Ref) + } + resultID = ref.id + } + mounts = append(mounts, &opspb.Mount{ + Dest: m.Dest, + Selector: m.Selector, + Readonly: m.Readonly, + MountType: m.MountType, + ResultID: resultID, + CacheOpt: m.CacheOpt, + SecretOpt: m.SecretOpt, + SSHOpt: m.SSHOpt, + }) + } + + logrus.Debugf("|---> NewContainer %s", id) + _, err = c.client.NewContainer(ctx, &pb.NewContainerRequest{ + ContainerID: id, + Mounts: mounts, + Platform: req.Platform, + Constraints: req.Constraints, + }) + if err != nil { + return nil, err + } + + // ensure message forwarder is started, only sets up stream first time called + err = c.execMsgs.Start() + if err != nil { + return nil, err + } + + return &container{ + client: c.client, + id: id, + execMsgs: c.execMsgs, + }, nil +} + +type container struct { + client pb.LLBBridgeClient + id string + execMsgs *messageForwarder +} + +func (ctr *container) Start(ctx context.Context, req client.StartRequest) (client.ContainerProcess, error) { + pid := fmt.Sprintf("%s:%s", ctr.id, identity.NewID()) + msgs := ctr.execMsgs.Register(pid) + + init := &pb.InitMessage{ + ContainerID: ctr.id, + Meta: &opspb.Meta{ + Args: req.Args, + Env: req.Env, + Cwd: req.Cwd, + User: req.User, + }, + Tty: req.Tty, + Security: req.SecurityMode, + } + if req.Stdin != nil { + init.Fds = append(init.Fds, 0) + } + if req.Stdout != nil { + init.Fds = append(init.Fds, 1) + } + if req.Stderr != nil { + init.Fds = append(init.Fds, 2) + } + + err := ctr.execMsgs.Send(&pb.ExecMessage{ + ProcessID: pid, + Input: &pb.ExecMessage_Init{ + Init: init, + }, + }) + if err != nil { + return nil, err + } + + msg, _ := msgs.Recv(ctx) + if msg == nil { + return nil, errors.Errorf("failed to receive started message") + } + started := msg.GetStarted() + if started == nil { + return nil, errors.Errorf("expecting started message, got %T", msg.GetInput()) + } + + eg, ctx := errgroup.WithContext(ctx) + done := make(chan struct{}) + + ctrProc := &containerProcess{ + execMsgs: ctr.execMsgs, + id: pid, + eg: eg, + } + + var stdinReader *io.PipeReader + ctrProc.eg.Go(func() error { + <-done + if stdinReader != nil { + return stdinReader.Close() + } + return nil + }) + + if req.Stdin != nil { + var stdinWriter io.WriteCloser + stdinReader, stdinWriter = io.Pipe() + // This go routine is intentionally not part of the errgroup because + // if os.Stdin is used for req.Stdin then this will block until + // the user closes the input, which will likely be after we are done + // with the container, so we can't Wait on it. + go func() { + io.Copy(stdinWriter, req.Stdin) + stdinWriter.Close() + }() + + ctrProc.eg.Go(func() error { + m := &msgWriter{ + mux: ctr.execMsgs, + processID: pid, + fd: 0, + } + _, err := io.Copy(m, stdinReader) + // ignore ErrClosedPipe, it is EOF for our usage. + if err != nil && !errors.Is(err, io.ErrClosedPipe) { + return err + } + // not an error so must be eof + return ctr.execMsgs.Send(&pb.ExecMessage{ + ProcessID: pid, + Input: &pb.ExecMessage_File{ + File: &pb.FdMessage{ + Fd: 0, + EOF: true, + }, + }, + }) + }) + } + + ctrProc.eg.Go(func() error { + var closeDoneOnce sync.Once + var exitError error + for { + msg, ok := msgs.Recv(ctx) + if !ok { + // no more messages, return + return exitError + } + + if msg == nil { + // empty message from ctx cancel, so just start shutting down + // input, but continue processing more exit/done messages + closeDoneOnce.Do(func() { + close(done) + }) + continue + } + + if file := msg.GetFile(); file != nil { + var out io.WriteCloser + switch file.Fd { + case 1: + out = req.Stdout + case 2: + out = req.Stderr + } + if out == nil { + // if things are plumbed correctly this should never happen + return errors.Errorf("missing writer for output fd %d", file.Fd) + } + if len(file.Data) > 0 { + _, err := out.Write(file.Data) + if err != nil { + return err + } + } + } else if exit := msg.GetExit(); exit != nil { + // capture exit message to exitError so we can return it after + // the server sends the Done message + closeDoneOnce.Do(func() { + close(done) + }) + if exit.Code == 0 { + continue + } + exitError = grpcerrors.FromGRPC(status.ErrorProto(&spb.Status{ + Code: exit.Error.Code, + Message: exit.Error.Message, + Details: convertGogoAny(exit.Error.Details), + })) + if exit.Code != containerd.UnknownExitStatus { + exitError = &errdefs.ExitError{ExitCode: exit.Code, Err: exitError} + } + } else if serverDone := msg.GetDone(); serverDone != nil { + return exitError + } else { + return errors.Errorf("unexpected Exec Message for pid %s: %T", pid, msg.GetInput()) + } + } + }) + + return ctrProc, nil +} + +func (ctr *container) Release(ctx context.Context) error { + logrus.Debugf("|---> ReleaseContainer %s", ctr.id) + _, err := ctr.client.ReleaseContainer(ctx, &pb.ReleaseContainerRequest{ + ContainerID: ctr.id, + }) + return err +} + +type containerProcess struct { + execMsgs *messageForwarder + id string + eg *errgroup.Group +} + +func (ctrProc *containerProcess) Wait() error { + defer ctrProc.execMsgs.Deregister(ctrProc.id) + return ctrProc.eg.Wait() +} + +func (ctrProc *containerProcess) Resize(_ context.Context, size client.WinSize) error { + return ctrProc.execMsgs.Send(&pb.ExecMessage{ + ProcessID: ctrProc.id, + Input: &pb.ExecMessage_Resize{ + Resize: &pb.ResizeMessage{ + Cols: size.Cols, + Rows: size.Rows, + }, + }, + }) } type reference struct { - c *grpcClient - id string - def *opspb.Definition - output llb.Output + c *grpcClient + id string + def *opspb.Definition } func newReference(c *grpcClient, ref *pb.Ref) (*reference, error) { @@ -502,11 +971,11 @@ func (r *reference) StatFile(ctx context.Context, req client.StatRequest) (*fsty } func grpcClientConn(ctx context.Context) (context.Context, *grpc.ClientConn, error) { - dialOpt := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) { + dialOpt := grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { return stdioConn(), nil }) - cc, err := grpc.DialContext(ctx, "", dialOpt, grpc.WithInsecure(), grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor), grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor)) + cc, err := grpc.DialContext(ctx, "localhost", dialOpt, grpc.WithInsecure(), grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor), grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor)) if err != nil { return nil, nil, errors.Wrap(err, "failed to create grpc client") } @@ -593,6 +1062,14 @@ func product() string { return os.Getenv("BUILDKIT_EXPORTEDPRODUCT") } +func convertGogoAny(in []*gogotypes.Any) []*any.Any { + out := make([]*any.Any, len(in)) + for i := range in { + out[i] = &any.Any{TypeUrl: in[i].TypeUrl, Value: in[i].Value} + } + return out +} + func convertToGogoAny(in []*any.Any) []*gogotypes.Any { out := make([]*gogotypes.Any, len(in)) for i := range in { diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go index afb51cf24327f48eba3cbf283e516e2bb49a0ba6..2c8760f6d102aa1f5b9f2ed68ab52a9c26d80051 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go @@ -1,4 +1,4 @@ -package moby_buildkit_v1_frontend +package moby_buildkit_v1_frontend //nolint:golint import "github.com/moby/buildkit/util/apicaps" @@ -35,6 +35,13 @@ const ( // CapGatewaySolveMetadata can be used to check if solve calls from gateway reliably return metadata CapGatewaySolveMetadata apicaps.CapID = "gateway.solve.metadata" + + // CapGatewayExec is the capability to create and interact with new + // containers directly through the gateway + CapGatewayExec apicaps.CapID = "gateway.exec" + + // CapFrontendCaps can be used to check that frontends define support for certain capabilities + CapFrontendCaps apicaps.CapID = "frontend.caps" ) func init() { @@ -136,4 +143,18 @@ func init() { Enabled: true, Status: apicaps.CapStatusExperimental, }) + + Caps.Init(apicaps.Cap{ + ID: CapGatewayExec, + Name: "gateway exec", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapFrontendCaps, + Name: "frontend capabilities", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) } diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go index a155367e1ad7352630122152e7e53eb66f9686c1..69936cd19761dff64118f0abbd91c5dcf04a8e36 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go @@ -1322,1168 +1322,1343 @@ func (m *PongResponse) GetWorkers() []*types1.WorkerRecord { return nil } -func init() { - proto.RegisterType((*Result)(nil), "moby.buildkit.v1.frontend.Result") - proto.RegisterMapType((map[string][]byte)(nil), "moby.buildkit.v1.frontend.Result.MetadataEntry") - proto.RegisterType((*RefMapDeprecated)(nil), "moby.buildkit.v1.frontend.RefMapDeprecated") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.frontend.RefMapDeprecated.RefsEntry") - proto.RegisterType((*Ref)(nil), "moby.buildkit.v1.frontend.Ref") - proto.RegisterType((*RefMap)(nil), "moby.buildkit.v1.frontend.RefMap") - proto.RegisterMapType((map[string]*Ref)(nil), "moby.buildkit.v1.frontend.RefMap.RefsEntry") - proto.RegisterType((*ReturnRequest)(nil), "moby.buildkit.v1.frontend.ReturnRequest") - proto.RegisterType((*ReturnResponse)(nil), "moby.buildkit.v1.frontend.ReturnResponse") - proto.RegisterType((*InputsRequest)(nil), "moby.buildkit.v1.frontend.InputsRequest") - proto.RegisterType((*InputsResponse)(nil), "moby.buildkit.v1.frontend.InputsResponse") - proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.frontend.InputsResponse.DefinitionsEntry") - proto.RegisterType((*ResolveImageConfigRequest)(nil), "moby.buildkit.v1.frontend.ResolveImageConfigRequest") - proto.RegisterType((*ResolveImageConfigResponse)(nil), "moby.buildkit.v1.frontend.ResolveImageConfigResponse") - proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.frontend.SolveRequest") - proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.frontend.SolveRequest.FrontendInputsEntry") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.frontend.SolveRequest.FrontendOptEntry") - proto.RegisterType((*CacheOptionsEntry)(nil), "moby.buildkit.v1.frontend.CacheOptionsEntry") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.frontend.CacheOptionsEntry.AttrsEntry") - proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.frontend.SolveResponse") - proto.RegisterType((*ReadFileRequest)(nil), "moby.buildkit.v1.frontend.ReadFileRequest") - proto.RegisterType((*FileRange)(nil), "moby.buildkit.v1.frontend.FileRange") - proto.RegisterType((*ReadFileResponse)(nil), "moby.buildkit.v1.frontend.ReadFileResponse") - proto.RegisterType((*ReadDirRequest)(nil), "moby.buildkit.v1.frontend.ReadDirRequest") - proto.RegisterType((*ReadDirResponse)(nil), "moby.buildkit.v1.frontend.ReadDirResponse") - proto.RegisterType((*StatFileRequest)(nil), "moby.buildkit.v1.frontend.StatFileRequest") - proto.RegisterType((*StatFileResponse)(nil), "moby.buildkit.v1.frontend.StatFileResponse") - proto.RegisterType((*PingRequest)(nil), "moby.buildkit.v1.frontend.PingRequest") - proto.RegisterType((*PongResponse)(nil), "moby.buildkit.v1.frontend.PongResponse") +type NewContainerRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` + // For mount input values we can use random identifiers passed with ref + Mounts []*pb.Mount `protobuf:"bytes,2,rep,name=Mounts,proto3" json:"Mounts,omitempty"` + Network pb.NetMode `protobuf:"varint,3,opt,name=Network,proto3,enum=pb.NetMode" json:"Network,omitempty"` + Platform *pb.Platform `protobuf:"bytes,4,opt,name=platform,proto3" json:"platform,omitempty"` + Constraints *pb.WorkerConstraints `protobuf:"bytes,5,opt,name=constraints,proto3" json:"constraints,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NewContainerRequest) Reset() { *m = NewContainerRequest{} } +func (m *NewContainerRequest) String() string { return proto.CompactTextString(m) } +func (*NewContainerRequest) ProtoMessage() {} +func (*NewContainerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{22} +} +func (m *NewContainerRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func init() { proto.RegisterFile("gateway.proto", fileDescriptor_f1a937782ebbded5) } - -var fileDescriptor_f1a937782ebbded5 = []byte{ - // 1436 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0xcd, 0x6e, 0xdb, 0xc6, - 0x16, 0x36, 0xad, 0x1f, 0x5b, 0x47, 0x92, 0xad, 0x4c, 0x2e, 0x2e, 0x14, 0x2e, 0x1c, 0x5d, 0x22, - 0xf0, 0x55, 0x12, 0x87, 0x6a, 0x9d, 0x04, 0x4e, 0x9d, 0x22, 0x69, 0x14, 0x27, 0xb0, 0x5b, 0x3b, - 0x51, 0x27, 0x2d, 0x02, 0x04, 0x29, 0x50, 0x4a, 0x1c, 0x32, 0x44, 0x24, 0x0e, 0x3b, 0x1c, 0x25, - 0x15, 0xba, 0x69, 0x77, 0xdd, 0x17, 0xe8, 0x03, 0xf4, 0x01, 0x8a, 0x3e, 0x41, 0xd7, 0x59, 0x76, - 0x59, 0x74, 0x11, 0x14, 0x7e, 0x92, 0x62, 0x7e, 0x68, 0x51, 0xb2, 0x4c, 0x59, 0xe8, 0x4a, 0x33, - 0x87, 0xe7, 0x3b, 0x73, 0xce, 0x37, 0xe7, 0x67, 0x04, 0x55, 0xdf, 0xe1, 0xe4, 0xad, 0x33, 0xb2, - 0x23, 0x46, 0x39, 0x45, 0x97, 0x06, 0xb4, 0x3b, 0xb2, 0xbb, 0xc3, 0xa0, 0xef, 0xbe, 0x0e, 0xb8, - 0xfd, 0xe6, 0x43, 0xdb, 0x63, 0x34, 0xe4, 0x24, 0x74, 0xcd, 0x1b, 0x7e, 0xc0, 0x5f, 0x0d, 0xbb, - 0x76, 0x8f, 0x0e, 0x5a, 0x3e, 0xf5, 0x69, 0x4b, 0x22, 0xba, 0x43, 0x4f, 0xee, 0xe4, 0x46, 0xae, - 0x94, 0x25, 0x73, 0x7b, 0x5a, 0xdd, 0xa7, 0xd4, 0xef, 0x13, 0x27, 0x0a, 0x62, 0xbd, 0x6c, 0xb1, - 0xa8, 0xd7, 0x8a, 0xb9, 0xc3, 0x87, 0xb1, 0xc6, 0x6c, 0xa5, 0x30, 0xc2, 0x91, 0x56, 0xe2, 0x48, - 0x2b, 0xa6, 0xfd, 0x37, 0x84, 0xb5, 0xa2, 0x6e, 0x8b, 0x46, 0x89, 0x76, 0xeb, 0x4c, 0x6d, 0x27, - 0x0a, 0x5a, 0x7c, 0x14, 0x91, 0xb8, 0xf5, 0x96, 0xb2, 0xd7, 0x84, 0x69, 0xc0, 0xcd, 0x33, 0x01, - 0x43, 0x1e, 0xf4, 0x05, 0xaa, 0xe7, 0x44, 0xb1, 0x38, 0x44, 0xfc, 0x6a, 0x50, 0x3a, 0x6c, 0x4e, - 0xc3, 0x20, 0xe6, 0x41, 0xe0, 0x07, 0x2d, 0x2f, 0x96, 0x18, 0x75, 0x8a, 0x08, 0x42, 0xa9, 0x5b, - 0x3f, 0xe6, 0xa0, 0x88, 0x49, 0x3c, 0xec, 0x73, 0xb4, 0x09, 0x55, 0x46, 0xbc, 0x3d, 0x12, 0x31, - 0xd2, 0x73, 0x38, 0x71, 0xeb, 0x46, 0xc3, 0x68, 0x96, 0xf6, 0x97, 0xf0, 0xa4, 0x18, 0x7d, 0x09, - 0x6b, 0x8c, 0x78, 0x71, 0x4a, 0x71, 0xb9, 0x61, 0x34, 0xcb, 0xdb, 0xd7, 0xed, 0x33, 0x2f, 0xc3, - 0xc6, 0xc4, 0x3b, 0x72, 0xa2, 0x31, 0x64, 0x7f, 0x09, 0x4f, 0x19, 0x41, 0xdb, 0x90, 0x63, 0xc4, - 0xab, 0xe7, 0xa4, 0xad, 0x8d, 0x6c, 0x5b, 0xfb, 0x4b, 0x58, 0x28, 0xa3, 0x1d, 0xc8, 0x0b, 0x2b, - 0xf5, 0xbc, 0x04, 0xfd, 0x6f, 0xae, 0x03, 0xfb, 0x4b, 0x58, 0x02, 0xd0, 0x67, 0xb0, 0x3a, 0x20, - 0xdc, 0x71, 0x1d, 0xee, 0xd4, 0xa1, 0x91, 0x6b, 0x96, 0xb7, 0x5b, 0x99, 0x60, 0x41, 0x90, 0x7d, - 0xa4, 0x11, 0x8f, 0x42, 0xce, 0x46, 0xf8, 0xc4, 0x80, 0x79, 0x17, 0xaa, 0x13, 0x9f, 0x50, 0x0d, - 0x72, 0xaf, 0xc9, 0x48, 0xf1, 0x87, 0xc5, 0x12, 0xfd, 0x07, 0x0a, 0x6f, 0x9c, 0xfe, 0x90, 0x48, - 0xaa, 0x2a, 0x58, 0x6d, 0x76, 0x97, 0xef, 0x18, 0xed, 0x55, 0x28, 0x32, 0x69, 0xde, 0xfa, 0xd9, - 0x80, 0xda, 0x34, 0x4f, 0xe8, 0x40, 0x47, 0x68, 0x48, 0x27, 0x6f, 0x2f, 0x40, 0xb1, 0x10, 0xc4, - 0xca, 0x55, 0x69, 0xc2, 0xdc, 0x81, 0xd2, 0x89, 0x68, 0x9e, 0x8b, 0xa5, 0x94, 0x8b, 0xd6, 0x0e, - 0xe4, 0x30, 0xf1, 0xd0, 0x1a, 0x2c, 0x07, 0x3a, 0x29, 0xf0, 0x72, 0xe0, 0xa2, 0x06, 0xe4, 0x5c, - 0xe2, 0xe9, 0xcb, 0x5f, 0xb3, 0xa3, 0xae, 0xbd, 0x47, 0xbc, 0x20, 0x0c, 0x78, 0x40, 0x43, 0x2c, - 0x3e, 0x59, 0xbf, 0x18, 0x22, 0xb9, 0x84, 0x5b, 0xe8, 0xfe, 0x44, 0x1c, 0xf3, 0x53, 0xe5, 0x94, - 0xf7, 0xcf, 0xb3, 0xbd, 0xbf, 0x95, 0xf6, 0x7e, 0x6e, 0xfe, 0xa4, 0xa3, 0xe3, 0x50, 0xc5, 0x84, - 0x0f, 0x59, 0x88, 0xc9, 0x37, 0x43, 0x12, 0x73, 0xf4, 0x51, 0x72, 0x23, 0xd2, 0xfe, 0xbc, 0xb4, - 0x12, 0x8a, 0x58, 0x03, 0x50, 0x13, 0x0a, 0x84, 0x31, 0xca, 0xb4, 0x17, 0xc8, 0x56, 0x9d, 0xc3, - 0x66, 0x51, 0xcf, 0x7e, 0x26, 0x3b, 0x07, 0x56, 0x0a, 0x56, 0x0d, 0xd6, 0x92, 0x53, 0xe3, 0x88, - 0x86, 0x31, 0xb1, 0xd6, 0xa1, 0x7a, 0x10, 0x46, 0x43, 0x1e, 0x6b, 0x3f, 0xac, 0xdf, 0x0d, 0x58, - 0x4b, 0x24, 0x4a, 0x07, 0xbd, 0x84, 0xf2, 0x98, 0xe3, 0x84, 0xcc, 0xdd, 0x0c, 0xff, 0x26, 0xf1, - 0xa9, 0x0b, 0xd2, 0xdc, 0xa6, 0xcd, 0x99, 0x4f, 0xa0, 0x36, 0xad, 0x30, 0x83, 0xe9, 0x2b, 0x93, - 0x4c, 0x4f, 0x5f, 0x7c, 0x8a, 0xd9, 0x9f, 0x0c, 0xb8, 0x84, 0x89, 0x6c, 0x85, 0x07, 0x03, 0xc7, - 0x27, 0x0f, 0x69, 0xe8, 0x05, 0x7e, 0x42, 0x73, 0x4d, 0x66, 0x55, 0x62, 0x59, 0x24, 0x58, 0x13, - 0x56, 0x3b, 0x7d, 0x87, 0x7b, 0x94, 0x0d, 0xb4, 0xf1, 0x8a, 0x30, 0x9e, 0xc8, 0xf0, 0xc9, 0x57, - 0xd4, 0x80, 0xb2, 0x36, 0x7c, 0x44, 0x5d, 0x22, 0x7b, 0x46, 0x09, 0xa7, 0x45, 0xa8, 0x0e, 0x2b, - 0x87, 0xd4, 0x7f, 0xe2, 0x0c, 0x88, 0x6c, 0x0e, 0x25, 0x9c, 0x6c, 0xad, 0xef, 0x0d, 0x30, 0x67, - 0x79, 0xa5, 0x29, 0xfe, 0x14, 0x8a, 0x7b, 0x81, 0x4f, 0x62, 0x75, 0xfb, 0xa5, 0xf6, 0xf6, 0xbb, - 0xf7, 0x97, 0x97, 0xfe, 0x7a, 0x7f, 0xf9, 0x5a, 0xaa, 0xaf, 0xd2, 0x88, 0x84, 0x3d, 0x1a, 0x72, - 0x27, 0x08, 0x09, 0x13, 0xe3, 0xe1, 0x86, 0x2b, 0x21, 0xb6, 0x42, 0x62, 0x6d, 0x01, 0xfd, 0x17, - 0x8a, 0xca, 0xba, 0x2e, 0x7b, 0xbd, 0xb3, 0xfe, 0x2c, 0x40, 0xe5, 0x99, 0x70, 0x20, 0xe1, 0xc2, - 0x06, 0x18, 0x53, 0xa8, 0xd3, 0x6e, 0x9a, 0xd8, 0x94, 0x06, 0x32, 0x61, 0xf5, 0xb1, 0xbe, 0x62, - 0x5d, 0xae, 0x27, 0x7b, 0xf4, 0x02, 0xca, 0xc9, 0xfa, 0x69, 0xc4, 0xeb, 0x39, 0x99, 0x23, 0x77, - 0x32, 0x72, 0x24, 0xed, 0x89, 0x9d, 0x82, 0xea, 0x0c, 0x49, 0x49, 0xd0, 0xc7, 0x70, 0xe9, 0x60, - 0x10, 0x51, 0xc6, 0x1f, 0x3a, 0xbd, 0x57, 0x04, 0x4f, 0x4e, 0x81, 0x7c, 0x23, 0xd7, 0x2c, 0xe1, - 0xb3, 0x15, 0xd0, 0x16, 0x5c, 0x70, 0xfa, 0x7d, 0xfa, 0x56, 0x17, 0x8d, 0x4c, 0xff, 0x7a, 0xa1, - 0x61, 0x34, 0x57, 0xf1, 0xe9, 0x0f, 0xe8, 0x03, 0xb8, 0x98, 0x12, 0x3e, 0x60, 0xcc, 0x19, 0x89, - 0x7c, 0x29, 0x4a, 0xfd, 0x59, 0x9f, 0x44, 0x07, 0x7b, 0x1c, 0x84, 0x4e, 0xbf, 0x0e, 0x52, 0x47, - 0x6d, 0x90, 0x05, 0x95, 0x47, 0xdf, 0x0a, 0x97, 0x08, 0x7b, 0xc0, 0x39, 0xab, 0x97, 0xe5, 0x55, - 0x4c, 0xc8, 0x50, 0x07, 0x2a, 0xd2, 0x61, 0xe5, 0x7b, 0x5c, 0xaf, 0x48, 0xd2, 0xb6, 0x32, 0x48, - 0x93, 0xea, 0x4f, 0xa3, 0x54, 0x29, 0x4d, 0x58, 0x40, 0x3d, 0x58, 0x4b, 0x88, 0x53, 0x35, 0x58, - 0xaf, 0x4a, 0x9b, 0x77, 0x17, 0xbd, 0x08, 0x85, 0x56, 0x47, 0x4c, 0x99, 0x34, 0xef, 0x41, 0x6d, - 0xfa, 0xbe, 0x16, 0x69, 0xec, 0xe6, 0xe7, 0x70, 0x71, 0xc6, 0x31, 0xff, 0xaa, 0xe6, 0x7f, 0x33, - 0xe0, 0xc2, 0x29, 0x6e, 0x10, 0x82, 0xfc, 0x17, 0xa3, 0x88, 0x68, 0x93, 0x72, 0x8d, 0x8e, 0xa0, - 0x20, 0xb8, 0x8f, 0xeb, 0xcb, 0x92, 0x98, 0x9d, 0x45, 0xc8, 0xb6, 0x25, 0x52, 0x91, 0xa2, 0xac, - 0x98, 0x77, 0x00, 0xc6, 0xc2, 0x85, 0xc6, 0xdb, 0x4b, 0xa8, 0x6a, 0xe6, 0x75, 0x0b, 0xa8, 0xa9, - 0x97, 0x88, 0x06, 0x8b, 0x77, 0xc6, 0x78, 0x24, 0xe4, 0x16, 0x1c, 0x09, 0xd6, 0x77, 0xb0, 0x8e, - 0x89, 0xe3, 0x3e, 0x0e, 0xfa, 0xe4, 0xec, 0xce, 0x27, 0xea, 0x39, 0xe8, 0x93, 0x8e, 0xc3, 0x5f, - 0x9d, 0xd4, 0xb3, 0xde, 0xa3, 0x5d, 0x28, 0x60, 0x27, 0xf4, 0x89, 0x3e, 0xfa, 0x4a, 0xc6, 0xd1, - 0xf2, 0x10, 0xa1, 0x8b, 0x15, 0xc4, 0xba, 0x0b, 0xa5, 0x13, 0x99, 0xe8, 0x46, 0x4f, 0x3d, 0x2f, - 0x26, 0xaa, 0xb3, 0xe5, 0xb0, 0xde, 0x09, 0xf9, 0x21, 0x09, 0x7d, 0x7d, 0x74, 0x0e, 0xeb, 0x9d, - 0xb5, 0x29, 0x9e, 0x23, 0x89, 0xe7, 0x9a, 0x1a, 0x04, 0xf9, 0x3d, 0xf1, 0x66, 0x32, 0x64, 0x11, - 0xc9, 0xb5, 0xe5, 0x8a, 0x51, 0xe6, 0xb8, 0x7b, 0x01, 0x3b, 0x3b, 0xc0, 0x3a, 0xac, 0xec, 0x05, - 0x2c, 0x15, 0x5f, 0xb2, 0x45, 0x9b, 0x62, 0xc8, 0xf5, 0xfa, 0x43, 0x57, 0x44, 0xcb, 0x09, 0x0b, - 0x75, 0x37, 0x9f, 0x92, 0x5a, 0xf7, 0x15, 0x8f, 0xf2, 0x14, 0xed, 0xcc, 0x16, 0xac, 0x90, 0x90, - 0xb3, 0x80, 0x24, 0x93, 0x10, 0xd9, 0xea, 0x99, 0x6b, 0xcb, 0x67, 0xae, 0x9c, 0xb8, 0x38, 0x51, - 0xb1, 0x76, 0x60, 0x5d, 0x08, 0xb2, 0x2f, 0x02, 0x41, 0x3e, 0xe5, 0xa4, 0x5c, 0x5b, 0xbb, 0x50, - 0x1b, 0x03, 0xf5, 0xd1, 0x9b, 0x90, 0x17, 0x8f, 0x68, 0xdd, 0xaa, 0x67, 0x9d, 0x2b, 0xbf, 0x5b, - 0x55, 0x28, 0x77, 0x82, 0x30, 0x99, 0x79, 0xd6, 0xb1, 0x01, 0x95, 0x0e, 0x0d, 0xc7, 0xd3, 0xa6, - 0x03, 0xeb, 0x49, 0x05, 0x3e, 0xe8, 0x1c, 0x3c, 0x74, 0xa2, 0x24, 0x94, 0xc6, 0xe9, 0x6b, 0xd6, - 0xef, 0x7d, 0x5b, 0x29, 0xb6, 0xf3, 0x62, 0x30, 0xe1, 0x69, 0x38, 0xfa, 0x04, 0x56, 0x0e, 0x0f, - 0xdb, 0xd2, 0xd2, 0xf2, 0x42, 0x96, 0x12, 0x18, 0xba, 0x07, 0x2b, 0xcf, 0xe5, 0xdf, 0x90, 0x58, - 0x0f, 0x8f, 0x19, 0x29, 0xa7, 0x02, 0x55, 0x6a, 0x98, 0xf4, 0x28, 0x73, 0x71, 0x02, 0xda, 0xfe, - 0xb5, 0x08, 0xa5, 0xc3, 0xc3, 0x76, 0x9b, 0x05, 0xae, 0x4f, 0xd0, 0x0f, 0x06, 0xa0, 0xd3, 0xe3, - 0x16, 0xdd, 0xca, 0xae, 0xa0, 0xd9, 0x6f, 0x06, 0xf3, 0xf6, 0x82, 0x28, 0xcd, 0xf2, 0x0b, 0x28, - 0xc8, 0x0a, 0x47, 0xff, 0x3f, 0x67, 0xf7, 0x35, 0x9b, 0xf3, 0x15, 0xb5, 0xed, 0x1e, 0xac, 0x26, - 0x55, 0x82, 0xae, 0x65, 0xba, 0x37, 0xd1, 0x04, 0xcc, 0xeb, 0xe7, 0xd2, 0xd5, 0x87, 0x7c, 0x0d, - 0x2b, 0x3a, 0xf9, 0xd1, 0xd5, 0x39, 0xb8, 0x71, 0x19, 0x9a, 0xd7, 0xce, 0xa3, 0x3a, 0x0e, 0x23, - 0x49, 0xf2, 0xcc, 0x30, 0xa6, 0x4a, 0x28, 0x33, 0x8c, 0x53, 0x55, 0xf3, 0x1c, 0xf2, 0xa2, 0x1a, - 0xd0, 0x66, 0x06, 0x28, 0x55, 0x2e, 0x66, 0xd6, 0x75, 0x4d, 0x94, 0xd1, 0x57, 0xe2, 0x7f, 0x86, - 0x7c, 0x35, 0x34, 0x33, 0x63, 0x4e, 0x3d, 0xf3, 0xcd, 0xab, 0xe7, 0xd0, 0x1c, 0x9b, 0x57, 0xf3, - 0x31, 0xd3, 0xfc, 0xc4, 0xeb, 0x3d, 0xd3, 0xfc, 0xe4, 0xab, 0xbc, 0x5d, 0x79, 0x77, 0xbc, 0x61, - 0xfc, 0x71, 0xbc, 0x61, 0xfc, 0x7d, 0xbc, 0x61, 0x74, 0x8b, 0xf2, 0x8f, 0xf9, 0xcd, 0x7f, 0x02, - 0x00, 0x00, 0xff, 0xff, 0xa9, 0x05, 0x0f, 0x9d, 0xea, 0x10, 0x00, 0x00, +func (m *NewContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NewContainerRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// LLBBridgeClient is the client API for LLBBridge service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type LLBBridgeClient interface { - // apicaps:CapResolveImage - ResolveImageConfig(ctx context.Context, in *ResolveImageConfigRequest, opts ...grpc.CallOption) (*ResolveImageConfigResponse, error) - // apicaps:CapSolveBase - Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) - // apicaps:CapReadFile - ReadFile(ctx context.Context, in *ReadFileRequest, opts ...grpc.CallOption) (*ReadFileResponse, error) - // apicaps:CapReadDir - ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) - // apicaps:CapStatFile - StatFile(ctx context.Context, in *StatFileRequest, opts ...grpc.CallOption) (*StatFileResponse, error) - Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) - Return(ctx context.Context, in *ReturnRequest, opts ...grpc.CallOption) (*ReturnResponse, error) - // apicaps:CapFrontendInputs - Inputs(ctx context.Context, in *InputsRequest, opts ...grpc.CallOption) (*InputsResponse, error) +func (m *NewContainerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewContainerRequest.Merge(m, src) } - -type lLBBridgeClient struct { - cc *grpc.ClientConn +func (m *NewContainerRequest) XXX_Size() int { + return m.Size() } - -func NewLLBBridgeClient(cc *grpc.ClientConn) LLBBridgeClient { - return &lLBBridgeClient{cc} +func (m *NewContainerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NewContainerRequest.DiscardUnknown(m) } -func (c *lLBBridgeClient) ResolveImageConfig(ctx context.Context, in *ResolveImageConfigRequest, opts ...grpc.CallOption) (*ResolveImageConfigResponse, error) { - out := new(ResolveImageConfigResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ResolveImageConfig", in, out, opts...) - if err != nil { - return nil, err +var xxx_messageInfo_NewContainerRequest proto.InternalMessageInfo + +func (m *NewContainerRequest) GetContainerID() string { + if m != nil { + return m.ContainerID } - return out, nil + return "" } -func (c *lLBBridgeClient) Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) { - out := new(SolveResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Solve", in, out, opts...) - if err != nil { - return nil, err +func (m *NewContainerRequest) GetMounts() []*pb.Mount { + if m != nil { + return m.Mounts } - return out, nil + return nil } -func (c *lLBBridgeClient) ReadFile(ctx context.Context, in *ReadFileRequest, opts ...grpc.CallOption) (*ReadFileResponse, error) { - out := new(ReadFileResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ReadFile", in, out, opts...) - if err != nil { - return nil, err +func (m *NewContainerRequest) GetNetwork() pb.NetMode { + if m != nil { + return m.Network } - return out, nil + return pb.NetMode_UNSET } -func (c *lLBBridgeClient) ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) { - out := new(ReadDirResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ReadDir", in, out, opts...) - if err != nil { - return nil, err +func (m *NewContainerRequest) GetPlatform() *pb.Platform { + if m != nil { + return m.Platform } - return out, nil + return nil } -func (c *lLBBridgeClient) StatFile(ctx context.Context, in *StatFileRequest, opts ...grpc.CallOption) (*StatFileResponse, error) { - out := new(StatFileResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/StatFile", in, out, opts...) - if err != nil { - return nil, err +func (m *NewContainerRequest) GetConstraints() *pb.WorkerConstraints { + if m != nil { + return m.Constraints } - return out, nil + return nil } -func (c *lLBBridgeClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) { - out := new(PongResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Ping", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +type NewContainerResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (c *lLBBridgeClient) Return(ctx context.Context, in *ReturnRequest, opts ...grpc.CallOption) (*ReturnResponse, error) { - out := new(ReturnResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Return", in, out, opts...) - if err != nil { - return nil, err +func (m *NewContainerResponse) Reset() { *m = NewContainerResponse{} } +func (m *NewContainerResponse) String() string { return proto.CompactTextString(m) } +func (*NewContainerResponse) ProtoMessage() {} +func (*NewContainerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{23} +} +func (m *NewContainerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NewContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NewContainerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return out, nil +} +func (m *NewContainerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewContainerResponse.Merge(m, src) +} +func (m *NewContainerResponse) XXX_Size() int { + return m.Size() +} +func (m *NewContainerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NewContainerResponse.DiscardUnknown(m) } -func (c *lLBBridgeClient) Inputs(ctx context.Context, in *InputsRequest, opts ...grpc.CallOption) (*InputsResponse, error) { - out := new(InputsResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Inputs", in, out, opts...) - if err != nil { - return nil, err +var xxx_messageInfo_NewContainerResponse proto.InternalMessageInfo + +type ReleaseContainerRequest struct { + ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReleaseContainerRequest) Reset() { *m = ReleaseContainerRequest{} } +func (m *ReleaseContainerRequest) String() string { return proto.CompactTextString(m) } +func (*ReleaseContainerRequest) ProtoMessage() {} +func (*ReleaseContainerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{24} +} +func (m *ReleaseContainerRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReleaseContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReleaseContainerRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return out, nil +} +func (m *ReleaseContainerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReleaseContainerRequest.Merge(m, src) +} +func (m *ReleaseContainerRequest) XXX_Size() int { + return m.Size() +} +func (m *ReleaseContainerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReleaseContainerRequest.DiscardUnknown(m) } -// LLBBridgeServer is the server API for LLBBridge service. -type LLBBridgeServer interface { - // apicaps:CapResolveImage - ResolveImageConfig(context.Context, *ResolveImageConfigRequest) (*ResolveImageConfigResponse, error) - // apicaps:CapSolveBase - Solve(context.Context, *SolveRequest) (*SolveResponse, error) - // apicaps:CapReadFile - ReadFile(context.Context, *ReadFileRequest) (*ReadFileResponse, error) - // apicaps:CapReadDir - ReadDir(context.Context, *ReadDirRequest) (*ReadDirResponse, error) - // apicaps:CapStatFile - StatFile(context.Context, *StatFileRequest) (*StatFileResponse, error) - Ping(context.Context, *PingRequest) (*PongResponse, error) - Return(context.Context, *ReturnRequest) (*ReturnResponse, error) - // apicaps:CapFrontendInputs - Inputs(context.Context, *InputsRequest) (*InputsResponse, error) +var xxx_messageInfo_ReleaseContainerRequest proto.InternalMessageInfo + +func (m *ReleaseContainerRequest) GetContainerID() string { + if m != nil { + return m.ContainerID + } + return "" } -// UnimplementedLLBBridgeServer can be embedded to have forward compatible implementations. -type UnimplementedLLBBridgeServer struct { +type ReleaseContainerResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (*UnimplementedLLBBridgeServer) ResolveImageConfig(ctx context.Context, req *ResolveImageConfigRequest) (*ResolveImageConfigResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ResolveImageConfig not implemented") +func (m *ReleaseContainerResponse) Reset() { *m = ReleaseContainerResponse{} } +func (m *ReleaseContainerResponse) String() string { return proto.CompactTextString(m) } +func (*ReleaseContainerResponse) ProtoMessage() {} +func (*ReleaseContainerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{25} } -func (*UnimplementedLLBBridgeServer) Solve(ctx context.Context, req *SolveRequest) (*SolveResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Solve not implemented") +func (m *ReleaseContainerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (*UnimplementedLLBBridgeServer) ReadFile(ctx context.Context, req *ReadFileRequest) (*ReadFileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReadFile not implemented") +func (m *ReleaseContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReleaseContainerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } -func (*UnimplementedLLBBridgeServer) ReadDir(ctx context.Context, req *ReadDirRequest) (*ReadDirResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReadDir not implemented") +func (m *ReleaseContainerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReleaseContainerResponse.Merge(m, src) } -func (*UnimplementedLLBBridgeServer) StatFile(ctx context.Context, req *StatFileRequest) (*StatFileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StatFile not implemented") +func (m *ReleaseContainerResponse) XXX_Size() int { + return m.Size() } -func (*UnimplementedLLBBridgeServer) Ping(ctx context.Context, req *PingRequest) (*PongResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") +func (m *ReleaseContainerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReleaseContainerResponse.DiscardUnknown(m) } -func (*UnimplementedLLBBridgeServer) Return(ctx context.Context, req *ReturnRequest) (*ReturnResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Return not implemented") + +var xxx_messageInfo_ReleaseContainerResponse proto.InternalMessageInfo + +type ExecMessage struct { + ProcessID string `protobuf:"bytes,1,opt,name=ProcessID,proto3" json:"ProcessID,omitempty"` + // Types that are valid to be assigned to Input: + // *ExecMessage_Init + // *ExecMessage_File + // *ExecMessage_Resize + // *ExecMessage_Started + // *ExecMessage_Exit + // *ExecMessage_Done + Input isExecMessage_Input `protobuf_oneof:"Input"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (*UnimplementedLLBBridgeServer) Inputs(ctx context.Context, req *InputsRequest) (*InputsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Inputs not implemented") + +func (m *ExecMessage) Reset() { *m = ExecMessage{} } +func (m *ExecMessage) String() string { return proto.CompactTextString(m) } +func (*ExecMessage) ProtoMessage() {} +func (*ExecMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{26} +} +func (m *ExecMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExecMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExecMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecMessage.Merge(m, src) +} +func (m *ExecMessage) XXX_Size() int { + return m.Size() +} +func (m *ExecMessage) XXX_DiscardUnknown() { + xxx_messageInfo_ExecMessage.DiscardUnknown(m) } -func RegisterLLBBridgeServer(s *grpc.Server, srv LLBBridgeServer) { - s.RegisterService(&_LLBBridge_serviceDesc, srv) +var xxx_messageInfo_ExecMessage proto.InternalMessageInfo + +type isExecMessage_Input interface { + isExecMessage_Input() + MarshalTo([]byte) (int, error) + Size() int } -func _LLBBridge_ResolveImageConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResolveImageConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).ResolveImageConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ResolveImageConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).ResolveImageConfig(ctx, req.(*ResolveImageConfigRequest)) - } - return interceptor(ctx, in, info, handler) +type ExecMessage_Init struct { + Init *InitMessage `protobuf:"bytes,2,opt,name=Init,proto3,oneof" json:"Init,omitempty"` +} +type ExecMessage_File struct { + File *FdMessage `protobuf:"bytes,3,opt,name=File,proto3,oneof" json:"File,omitempty"` +} +type ExecMessage_Resize struct { + Resize *ResizeMessage `protobuf:"bytes,4,opt,name=Resize,proto3,oneof" json:"Resize,omitempty"` +} +type ExecMessage_Started struct { + Started *StartedMessage `protobuf:"bytes,5,opt,name=Started,proto3,oneof" json:"Started,omitempty"` +} +type ExecMessage_Exit struct { + Exit *ExitMessage `protobuf:"bytes,6,opt,name=Exit,proto3,oneof" json:"Exit,omitempty"` +} +type ExecMessage_Done struct { + Done *DoneMessage `protobuf:"bytes,7,opt,name=Done,proto3,oneof" json:"Done,omitempty"` } -func _LLBBridge_Solve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SolveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).Solve(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Solve", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).Solve(ctx, req.(*SolveRequest)) +func (*ExecMessage_Init) isExecMessage_Input() {} +func (*ExecMessage_File) isExecMessage_Input() {} +func (*ExecMessage_Resize) isExecMessage_Input() {} +func (*ExecMessage_Started) isExecMessage_Input() {} +func (*ExecMessage_Exit) isExecMessage_Input() {} +func (*ExecMessage_Done) isExecMessage_Input() {} + +func (m *ExecMessage) GetInput() isExecMessage_Input { + if m != nil { + return m.Input } - return interceptor(ctx, in, info, handler) + return nil } -func _LLBBridge_ReadFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReadFileRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).ReadFile(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ReadFile", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).ReadFile(ctx, req.(*ReadFileRequest)) +func (m *ExecMessage) GetProcessID() string { + if m != nil { + return m.ProcessID } - return interceptor(ctx, in, info, handler) + return "" } -func _LLBBridge_ReadDir_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReadDirRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).ReadDir(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ReadDir", +func (m *ExecMessage) GetInit() *InitMessage { + if x, ok := m.GetInput().(*ExecMessage_Init); ok { + return x.Init } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).ReadDir(ctx, req.(*ReadDirRequest)) - } - return interceptor(ctx, in, info, handler) + return nil } -func _LLBBridge_StatFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatFileRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).StatFile(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/StatFile", +func (m *ExecMessage) GetFile() *FdMessage { + if x, ok := m.GetInput().(*ExecMessage_File); ok { + return x.File } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).StatFile(ctx, req.(*StatFileRequest)) - } - return interceptor(ctx, in, info, handler) + return nil } -func _LLBBridge_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PingRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).Ping(ctx, in) +func (m *ExecMessage) GetResize() *ResizeMessage { + if x, ok := m.GetInput().(*ExecMessage_Resize); ok { + return x.Resize } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Ping", + return nil +} + +func (m *ExecMessage) GetStarted() *StartedMessage { + if x, ok := m.GetInput().(*ExecMessage_Started); ok { + return x.Started } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).Ping(ctx, req.(*PingRequest)) + return nil +} + +func (m *ExecMessage) GetExit() *ExitMessage { + if x, ok := m.GetInput().(*ExecMessage_Exit); ok { + return x.Exit } - return interceptor(ctx, in, info, handler) + return nil } -func _LLBBridge_Return_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReturnRequest) - if err := dec(in); err != nil { - return nil, err +func (m *ExecMessage) GetDone() *DoneMessage { + if x, ok := m.GetInput().(*ExecMessage_Done); ok { + return x.Done } - if interceptor == nil { - return srv.(LLBBridgeServer).Return(ctx, in) + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ExecMessage) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ExecMessage_Init)(nil), + (*ExecMessage_File)(nil), + (*ExecMessage_Resize)(nil), + (*ExecMessage_Started)(nil), + (*ExecMessage_Exit)(nil), + (*ExecMessage_Done)(nil), } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Return", +} + +type InitMessage struct { + ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` + Meta *pb.Meta `protobuf:"bytes,2,opt,name=Meta,proto3" json:"Meta,omitempty"` + Fds []uint32 `protobuf:"varint,3,rep,packed,name=Fds,proto3" json:"Fds,omitempty"` + Tty bool `protobuf:"varint,4,opt,name=Tty,proto3" json:"Tty,omitempty"` + Security pb.SecurityMode `protobuf:"varint,5,opt,name=Security,proto3,enum=pb.SecurityMode" json:"Security,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitMessage) Reset() { *m = InitMessage{} } +func (m *InitMessage) String() string { return proto.CompactTextString(m) } +func (*InitMessage) ProtoMessage() {} +func (*InitMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{27} +} +func (m *InitMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *InitMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_InitMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).Return(ctx, req.(*ReturnRequest)) +} +func (m *InitMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitMessage.Merge(m, src) +} +func (m *InitMessage) XXX_Size() int { + return m.Size() +} +func (m *InitMessage) XXX_DiscardUnknown() { + xxx_messageInfo_InitMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_InitMessage proto.InternalMessageInfo + +func (m *InitMessage) GetContainerID() string { + if m != nil { + return m.ContainerID } - return interceptor(ctx, in, info, handler) + return "" } -func _LLBBridge_Inputs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(InputsRequest) - if err := dec(in); err != nil { - return nil, err +func (m *InitMessage) GetMeta() *pb.Meta { + if m != nil { + return m.Meta } - if interceptor == nil { - return srv.(LLBBridgeServer).Inputs(ctx, in) + return nil +} + +func (m *InitMessage) GetFds() []uint32 { + if m != nil { + return m.Fds } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Inputs", + return nil +} + +func (m *InitMessage) GetTty() bool { + if m != nil { + return m.Tty } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).Inputs(ctx, req.(*InputsRequest)) + return false +} + +func (m *InitMessage) GetSecurity() pb.SecurityMode { + if m != nil { + return m.Security } - return interceptor(ctx, in, info, handler) + return pb.SecurityMode_SANDBOX } -var _LLBBridge_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.buildkit.v1.frontend.LLBBridge", - HandlerType: (*LLBBridgeServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ResolveImageConfig", - Handler: _LLBBridge_ResolveImageConfig_Handler, - }, - { - MethodName: "Solve", - Handler: _LLBBridge_Solve_Handler, - }, - { - MethodName: "ReadFile", - Handler: _LLBBridge_ReadFile_Handler, - }, - { - MethodName: "ReadDir", - Handler: _LLBBridge_ReadDir_Handler, - }, - { - MethodName: "StatFile", - Handler: _LLBBridge_StatFile_Handler, - }, - { - MethodName: "Ping", - Handler: _LLBBridge_Ping_Handler, - }, - { - MethodName: "Return", - Handler: _LLBBridge_Return_Handler, - }, - { - MethodName: "Inputs", - Handler: _LLBBridge_Inputs_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "gateway.proto", +type ExitMessage struct { + Code uint32 `protobuf:"varint,1,opt,name=Code,proto3" json:"Code,omitempty"` + Error *rpc.Status `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Result) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ExitMessage) Reset() { *m = ExitMessage{} } +func (m *ExitMessage) String() string { return proto.CompactTextString(m) } +func (*ExitMessage) ProtoMessage() {} +func (*ExitMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{28} +} +func (m *ExitMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExitMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExitMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return dAtA[:n], nil +} +func (m *ExitMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExitMessage.Merge(m, src) +} +func (m *ExitMessage) XXX_Size() int { + return m.Size() +} +func (m *ExitMessage) XXX_DiscardUnknown() { + xxx_messageInfo_ExitMessage.DiscardUnknown(m) } -func (m *Result) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +var xxx_messageInfo_ExitMessage proto.InternalMessageInfo + +func (m *ExitMessage) GetCode() uint32 { + if m != nil { + return m.Code + } + return 0 } -func (m *Result) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (m *ExitMessage) GetError() *rpc.Status { + if m != nil { + return m.Error } - if len(m.Metadata) > 0 { - for k := range m.Metadata { - v := m.Metadata[k] - baseI := i - if len(v) > 0 { - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGateway(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x52 - } - } - if m.Result != nil { - { - size := m.Result.Size() - i -= size - if _, err := m.Result.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil + return nil } -func (m *Result_RefDeprecated) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type StartedMessage struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Result_RefDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= len(m.RefDeprecated) - copy(dAtA[i:], m.RefDeprecated) - i = encodeVarintGateway(dAtA, i, uint64(len(m.RefDeprecated))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil +func (m *StartedMessage) Reset() { *m = StartedMessage{} } +func (m *StartedMessage) String() string { return proto.CompactTextString(m) } +func (*StartedMessage) ProtoMessage() {} +func (*StartedMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{29} } -func (m *Result_RefsDeprecated) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *StartedMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (m *Result_RefsDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.RefsDeprecated != nil { - { - size, err := m.RefsDeprecated.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) +func (m *StartedMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StartedMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x12 + return b[:n], nil } - return len(dAtA) - i, nil } -func (m *Result_Ref) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *StartedMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartedMessage.Merge(m, src) +} +func (m *StartedMessage) XXX_Size() int { + return m.Size() +} +func (m *StartedMessage) XXX_DiscardUnknown() { + xxx_messageInfo_StartedMessage.DiscardUnknown(m) } -func (m *Result_Ref) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Ref != nil { - { - size, err := m.Ref.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) +var xxx_messageInfo_StartedMessage proto.InternalMessageInfo + +type DoneMessage struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoneMessage) Reset() { *m = DoneMessage{} } +func (m *DoneMessage) String() string { return proto.CompactTextString(m) } +func (*DoneMessage) ProtoMessage() {} +func (*DoneMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{30} +} +func (m *DoneMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoneMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoneMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x1a + return b[:n], nil } - return len(dAtA) - i, nil } -func (m *Result_Refs) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *DoneMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoneMessage.Merge(m, src) +} +func (m *DoneMessage) XXX_Size() int { + return m.Size() +} +func (m *DoneMessage) XXX_DiscardUnknown() { + xxx_messageInfo_DoneMessage.DiscardUnknown(m) } -func (m *Result_Refs) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Refs != nil { - { - size, err := m.Refs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) +var xxx_messageInfo_DoneMessage proto.InternalMessageInfo + +type FdMessage struct { + Fd uint32 `protobuf:"varint,1,opt,name=Fd,proto3" json:"Fd,omitempty"` + EOF bool `protobuf:"varint,2,opt,name=EOF,proto3" json:"EOF,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=Data,proto3" json:"Data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FdMessage) Reset() { *m = FdMessage{} } +func (m *FdMessage) String() string { return proto.CompactTextString(m) } +func (*FdMessage) ProtoMessage() {} +func (*FdMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{31} +} +func (m *FdMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FdMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FdMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x22 + return b[:n], nil } - return len(dAtA) - i, nil } -func (m *RefMapDeprecated) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *FdMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_FdMessage.Merge(m, src) } - -func (m *RefMapDeprecated) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *FdMessage) XXX_Size() int { + return m.Size() +} +func (m *FdMessage) XXX_DiscardUnknown() { + xxx_messageInfo_FdMessage.DiscardUnknown(m) } -func (m *RefMapDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +var xxx_messageInfo_FdMessage proto.InternalMessageInfo + +func (m *FdMessage) GetFd() uint32 { + if m != nil { + return m.Fd } - if len(m.Refs) > 0 { - for k := range m.Refs { - v := m.Refs[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGateway(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } + return 0 +} + +func (m *FdMessage) GetEOF() bool { + if m != nil { + return m.EOF } - return len(dAtA) - i, nil + return false } -func (m *Ref) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *FdMessage) GetData() []byte { + if m != nil { + return m.Data } - return dAtA[:n], nil + return nil } -func (m *Ref) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type ResizeMessage struct { + Rows uint32 `protobuf:"varint,1,opt,name=Rows,proto3" json:"Rows,omitempty"` + Cols uint32 `protobuf:"varint,2,opt,name=Cols,proto3" json:"Cols,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Ref) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Def != nil { - { - size, err := m.Def.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) +func (m *ResizeMessage) Reset() { *m = ResizeMessage{} } +func (m *ResizeMessage) String() string { return proto.CompactTextString(m) } +func (*ResizeMessage) ProtoMessage() {} +func (*ResizeMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{32} +} +func (m *ResizeMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResizeMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResizeMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa + return b[:n], nil } - return len(dAtA) - i, nil +} +func (m *ResizeMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResizeMessage.Merge(m, src) +} +func (m *ResizeMessage) XXX_Size() int { + return m.Size() +} +func (m *ResizeMessage) XXX_DiscardUnknown() { + xxx_messageInfo_ResizeMessage.DiscardUnknown(m) } -func (m *RefMap) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +var xxx_messageInfo_ResizeMessage proto.InternalMessageInfo + +func (m *ResizeMessage) GetRows() uint32 { + if m != nil { + return m.Rows } - return dAtA[:n], nil + return 0 } -func (m *RefMap) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ResizeMessage) GetCols() uint32 { + if m != nil { + return m.Cols + } + return 0 } -func (m *RefMap) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Refs) > 0 { - for k := range m.Refs { - v := m.Refs[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil +func init() { + proto.RegisterType((*Result)(nil), "moby.buildkit.v1.frontend.Result") + proto.RegisterMapType((map[string][]byte)(nil), "moby.buildkit.v1.frontend.Result.MetadataEntry") + proto.RegisterType((*RefMapDeprecated)(nil), "moby.buildkit.v1.frontend.RefMapDeprecated") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.frontend.RefMapDeprecated.RefsEntry") + proto.RegisterType((*Ref)(nil), "moby.buildkit.v1.frontend.Ref") + proto.RegisterType((*RefMap)(nil), "moby.buildkit.v1.frontend.RefMap") + proto.RegisterMapType((map[string]*Ref)(nil), "moby.buildkit.v1.frontend.RefMap.RefsEntry") + proto.RegisterType((*ReturnRequest)(nil), "moby.buildkit.v1.frontend.ReturnRequest") + proto.RegisterType((*ReturnResponse)(nil), "moby.buildkit.v1.frontend.ReturnResponse") + proto.RegisterType((*InputsRequest)(nil), "moby.buildkit.v1.frontend.InputsRequest") + proto.RegisterType((*InputsResponse)(nil), "moby.buildkit.v1.frontend.InputsResponse") + proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.frontend.InputsResponse.DefinitionsEntry") + proto.RegisterType((*ResolveImageConfigRequest)(nil), "moby.buildkit.v1.frontend.ResolveImageConfigRequest") + proto.RegisterType((*ResolveImageConfigResponse)(nil), "moby.buildkit.v1.frontend.ResolveImageConfigResponse") + proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.frontend.SolveRequest") + proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.frontend.SolveRequest.FrontendInputsEntry") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.frontend.SolveRequest.FrontendOptEntry") + proto.RegisterType((*CacheOptionsEntry)(nil), "moby.buildkit.v1.frontend.CacheOptionsEntry") + proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.frontend.CacheOptionsEntry.AttrsEntry") + proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.frontend.SolveResponse") + proto.RegisterType((*ReadFileRequest)(nil), "moby.buildkit.v1.frontend.ReadFileRequest") + proto.RegisterType((*FileRange)(nil), "moby.buildkit.v1.frontend.FileRange") + proto.RegisterType((*ReadFileResponse)(nil), "moby.buildkit.v1.frontend.ReadFileResponse") + proto.RegisterType((*ReadDirRequest)(nil), "moby.buildkit.v1.frontend.ReadDirRequest") + proto.RegisterType((*ReadDirResponse)(nil), "moby.buildkit.v1.frontend.ReadDirResponse") + proto.RegisterType((*StatFileRequest)(nil), "moby.buildkit.v1.frontend.StatFileRequest") + proto.RegisterType((*StatFileResponse)(nil), "moby.buildkit.v1.frontend.StatFileResponse") + proto.RegisterType((*PingRequest)(nil), "moby.buildkit.v1.frontend.PingRequest") + proto.RegisterType((*PongResponse)(nil), "moby.buildkit.v1.frontend.PongResponse") + proto.RegisterType((*NewContainerRequest)(nil), "moby.buildkit.v1.frontend.NewContainerRequest") + proto.RegisterType((*NewContainerResponse)(nil), "moby.buildkit.v1.frontend.NewContainerResponse") + proto.RegisterType((*ReleaseContainerRequest)(nil), "moby.buildkit.v1.frontend.ReleaseContainerRequest") + proto.RegisterType((*ReleaseContainerResponse)(nil), "moby.buildkit.v1.frontend.ReleaseContainerResponse") + proto.RegisterType((*ExecMessage)(nil), "moby.buildkit.v1.frontend.ExecMessage") + proto.RegisterType((*InitMessage)(nil), "moby.buildkit.v1.frontend.InitMessage") + proto.RegisterType((*ExitMessage)(nil), "moby.buildkit.v1.frontend.ExitMessage") + proto.RegisterType((*StartedMessage)(nil), "moby.buildkit.v1.frontend.StartedMessage") + proto.RegisterType((*DoneMessage)(nil), "moby.buildkit.v1.frontend.DoneMessage") + proto.RegisterType((*FdMessage)(nil), "moby.buildkit.v1.frontend.FdMessage") + proto.RegisterType((*ResizeMessage)(nil), "moby.buildkit.v1.frontend.ResizeMessage") } -func (m *ReturnRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func init() { proto.RegisterFile("gateway.proto", fileDescriptor_f1a937782ebbded5) } + +var fileDescriptor_f1a937782ebbded5 = []byte{ + // 1899 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xcf, 0x6f, 0x1b, 0xc7, + 0xf5, 0xd7, 0x8a, 0xa4, 0x48, 0x3e, 0x92, 0x32, 0x33, 0xce, 0x37, 0x5f, 0x7a, 0x11, 0x38, 0xcc, + 0x22, 0x55, 0x69, 0x47, 0x59, 0xa6, 0x74, 0x02, 0xb9, 0x72, 0x90, 0xd4, 0xd4, 0x0f, 0x58, 0x8d, + 0x24, 0xab, 0xe3, 0x14, 0x06, 0x82, 0x14, 0xe8, 0x8a, 0x3b, 0xa4, 0x17, 0xa6, 0x76, 0xb7, 0xb3, + 0x43, 0xcb, 0x4c, 0x2e, 0xed, 0xad, 0xf7, 0x02, 0xbd, 0x16, 0xe8, 0x5f, 0xd0, 0x4b, 0xaf, 0x3d, + 0xe7, 0xd8, 0x63, 0xd1, 0x43, 0x50, 0x18, 0xfd, 0x3f, 0x5a, 0xbc, 0xf9, 0x41, 0x2e, 0x29, 0x6a, + 0x29, 0xa2, 0x27, 0xce, 0xbc, 0x7d, 0x9f, 0x37, 0x6f, 0x3e, 0xf3, 0xe6, 0xbd, 0x37, 0x84, 0xda, + 0xc0, 0x13, 0xec, 0xd2, 0x1b, 0xbb, 0x31, 0x8f, 0x44, 0x44, 0xee, 0x5c, 0x44, 0xe7, 0x63, 0xf7, + 0x7c, 0x14, 0x0c, 0xfd, 0x97, 0x81, 0x70, 0x5f, 0xfd, 0xc4, 0xed, 0xf3, 0x28, 0x14, 0x2c, 0xf4, + 0xed, 0x8f, 0x06, 0x81, 0x78, 0x31, 0x3a, 0x77, 0x7b, 0xd1, 0x45, 0x7b, 0x10, 0x0d, 0xa2, 0xb6, + 0x44, 0x9c, 0x8f, 0xfa, 0x72, 0x26, 0x27, 0x72, 0xa4, 0x2c, 0xd9, 0x9d, 0x79, 0xf5, 0x41, 0x14, + 0x0d, 0x86, 0xcc, 0x8b, 0x83, 0x44, 0x0f, 0xdb, 0x3c, 0xee, 0xb5, 0x13, 0xe1, 0x89, 0x51, 0xa2, + 0x31, 0xdb, 0x29, 0x0c, 0x3a, 0xd2, 0x36, 0x8e, 0xb4, 0x93, 0x68, 0xf8, 0x8a, 0xf1, 0x76, 0x7c, + 0xde, 0x8e, 0x62, 0xa3, 0xdd, 0xbe, 0x56, 0xdb, 0x8b, 0x83, 0xb6, 0x18, 0xc7, 0x2c, 0x69, 0x5f, + 0x46, 0xfc, 0x25, 0xe3, 0x1a, 0xf0, 0xe0, 0x5a, 0xc0, 0x48, 0x04, 0x43, 0x44, 0xf5, 0xbc, 0x38, + 0xc1, 0x45, 0xf0, 0x57, 0x83, 0xd2, 0xdb, 0x16, 0x51, 0x18, 0x24, 0x22, 0x08, 0x06, 0x41, 0xbb, + 0x9f, 0x48, 0x8c, 0x5a, 0x05, 0x37, 0xa1, 0xd4, 0x9d, 0xdf, 0xe7, 0x60, 0x83, 0xb2, 0x64, 0x34, + 0x14, 0x64, 0x0b, 0x6a, 0x9c, 0xf5, 0xf7, 0x59, 0xcc, 0x59, 0xcf, 0x13, 0xcc, 0x6f, 0x58, 0x4d, + 0xab, 0x55, 0x7e, 0xb2, 0x46, 0x67, 0xc5, 0xe4, 0x97, 0xb0, 0xc9, 0x59, 0x3f, 0x49, 0x29, 0xae, + 0x37, 0xad, 0x56, 0xa5, 0xf3, 0xa1, 0x7b, 0xed, 0x61, 0xb8, 0x94, 0xf5, 0x4f, 0xbc, 0x78, 0x0a, + 0x79, 0xb2, 0x46, 0xe7, 0x8c, 0x90, 0x0e, 0xe4, 0x38, 0xeb, 0x37, 0x72, 0xd2, 0xd6, 0xdd, 0x6c, + 0x5b, 0x4f, 0xd6, 0x28, 0x2a, 0x93, 0x1d, 0xc8, 0xa3, 0x95, 0x46, 0x5e, 0x82, 0xde, 0x5f, 0xea, + 0xc0, 0x93, 0x35, 0x2a, 0x01, 0xe4, 0x4b, 0x28, 0x5d, 0x30, 0xe1, 0xf9, 0x9e, 0xf0, 0x1a, 0xd0, + 0xcc, 0xb5, 0x2a, 0x9d, 0x76, 0x26, 0x18, 0x09, 0x72, 0x4f, 0x34, 0xe2, 0x20, 0x14, 0x7c, 0x4c, + 0x27, 0x06, 0xec, 0x47, 0x50, 0x9b, 0xf9, 0x44, 0xea, 0x90, 0x7b, 0xc9, 0xc6, 0x8a, 0x3f, 0x8a, + 0x43, 0xf2, 0x36, 0x14, 0x5e, 0x79, 0xc3, 0x11, 0x93, 0x54, 0x55, 0xa9, 0x9a, 0xec, 0xae, 0x3f, + 0xb4, 0xba, 0x25, 0xd8, 0xe0, 0xd2, 0xbc, 0xf3, 0x47, 0x0b, 0xea, 0xf3, 0x3c, 0x91, 0x23, 0xbd, + 0x43, 0x4b, 0x3a, 0xf9, 0xe9, 0x0a, 0x14, 0xa3, 0x20, 0x51, 0xae, 0x4a, 0x13, 0xf6, 0x0e, 0x94, + 0x27, 0xa2, 0x65, 0x2e, 0x96, 0x53, 0x2e, 0x3a, 0x3b, 0x90, 0xa3, 0xac, 0x4f, 0x36, 0x61, 0x3d, + 0xd0, 0x41, 0x41, 0xd7, 0x03, 0x9f, 0x34, 0x21, 0xe7, 0xb3, 0xbe, 0x3e, 0xfc, 0x4d, 0x37, 0x3e, + 0x77, 0xf7, 0x59, 0x3f, 0x08, 0x03, 0x11, 0x44, 0x21, 0xc5, 0x4f, 0xce, 0x9f, 0x2d, 0x0c, 0x2e, + 0x74, 0x8b, 0x7c, 0x31, 0xb3, 0x8f, 0xe5, 0xa1, 0x72, 0xc5, 0xfb, 0xe7, 0xd9, 0xde, 0x7f, 0x92, + 0xf6, 0x7e, 0x69, 0xfc, 0xa4, 0x77, 0x27, 0xa0, 0x46, 0x99, 0x18, 0xf1, 0x90, 0xb2, 0xdf, 0x8c, + 0x58, 0x22, 0xc8, 0x4f, 0xcd, 0x89, 0x48, 0xfb, 0xcb, 0xc2, 0x0a, 0x15, 0xa9, 0x06, 0x90, 0x16, + 0x14, 0x18, 0xe7, 0x11, 0xd7, 0x5e, 0x10, 0x57, 0x65, 0x0e, 0x97, 0xc7, 0x3d, 0xf7, 0x99, 0xcc, + 0x1c, 0x54, 0x29, 0x38, 0x75, 0xd8, 0x34, 0xab, 0x26, 0x71, 0x14, 0x26, 0xcc, 0xb9, 0x05, 0xb5, + 0xa3, 0x30, 0x1e, 0x89, 0x44, 0xfb, 0xe1, 0xfc, 0xcd, 0x82, 0x4d, 0x23, 0x51, 0x3a, 0xe4, 0x1b, + 0xa8, 0x4c, 0x39, 0x36, 0x64, 0xee, 0x66, 0xf8, 0x37, 0x8b, 0x4f, 0x1d, 0x90, 0xe6, 0x36, 0x6d, + 0xce, 0x3e, 0x85, 0xfa, 0xbc, 0xc2, 0x02, 0xa6, 0x3f, 0x98, 0x65, 0x7a, 0xfe, 0xe0, 0x53, 0xcc, + 0xfe, 0xc1, 0x82, 0x3b, 0x94, 0xc9, 0x54, 0x78, 0x74, 0xe1, 0x0d, 0xd8, 0x5e, 0x14, 0xf6, 0x83, + 0x81, 0xa1, 0xb9, 0x2e, 0xa3, 0xca, 0x58, 0xc6, 0x00, 0x6b, 0x41, 0xe9, 0x6c, 0xe8, 0x89, 0x7e, + 0xc4, 0x2f, 0xb4, 0xf1, 0x2a, 0x1a, 0x37, 0x32, 0x3a, 0xf9, 0x4a, 0x9a, 0x50, 0xd1, 0x86, 0x4f, + 0x22, 0x9f, 0xc9, 0x9c, 0x51, 0xa6, 0x69, 0x11, 0x69, 0x40, 0xf1, 0x38, 0x1a, 0x9c, 0x7a, 0x17, + 0x4c, 0x26, 0x87, 0x32, 0x35, 0x53, 0xe7, 0xb7, 0x16, 0xd8, 0x8b, 0xbc, 0xd2, 0x14, 0xff, 0x1c, + 0x36, 0xf6, 0x83, 0x01, 0x4b, 0xd4, 0xe9, 0x97, 0xbb, 0x9d, 0xef, 0x7f, 0x78, 0x6f, 0xed, 0x9f, + 0x3f, 0xbc, 0x77, 0x3f, 0x95, 0x57, 0xa3, 0x98, 0x85, 0xbd, 0x28, 0x14, 0x5e, 0x10, 0x32, 0x8e, + 0xe5, 0xe1, 0x23, 0x5f, 0x42, 0x5c, 0x85, 0xa4, 0xda, 0x02, 0x79, 0x07, 0x36, 0x94, 0x75, 0x7d, + 0xed, 0xf5, 0xcc, 0xf9, 0x47, 0x01, 0xaa, 0xcf, 0xd0, 0x01, 0xc3, 0x85, 0x0b, 0x30, 0xa5, 0x50, + 0x87, 0xdd, 0x3c, 0xb1, 0x29, 0x0d, 0x62, 0x43, 0xe9, 0x50, 0x1f, 0xb1, 0xbe, 0xae, 0x93, 0x39, + 0xf9, 0x1a, 0x2a, 0x66, 0xfc, 0x34, 0x16, 0x8d, 0x9c, 0x8c, 0x91, 0x87, 0x19, 0x31, 0x92, 0xf6, + 0xc4, 0x4d, 0x41, 0x75, 0x84, 0xa4, 0x24, 0xe4, 0x33, 0xb8, 0x73, 0x74, 0x11, 0x47, 0x5c, 0xec, + 0x79, 0xbd, 0x17, 0x8c, 0xce, 0x56, 0x81, 0x7c, 0x33, 0xd7, 0x2a, 0xd3, 0xeb, 0x15, 0xc8, 0x36, + 0xbc, 0xe5, 0x0d, 0x87, 0xd1, 0xa5, 0xbe, 0x34, 0x32, 0xfc, 0x1b, 0x85, 0xa6, 0xd5, 0x2a, 0xd1, + 0xab, 0x1f, 0xc8, 0xc7, 0x70, 0x3b, 0x25, 0x7c, 0xcc, 0xb9, 0x37, 0xc6, 0x78, 0xd9, 0x90, 0xfa, + 0x8b, 0x3e, 0x61, 0x06, 0x3b, 0x0c, 0x42, 0x6f, 0xd8, 0x00, 0xa9, 0xa3, 0x26, 0xc4, 0x81, 0xea, + 0xc1, 0x6b, 0x74, 0x89, 0xf1, 0xc7, 0x42, 0xf0, 0x46, 0x45, 0x1e, 0xc5, 0x8c, 0x8c, 0x9c, 0x41, + 0x55, 0x3a, 0xac, 0x7c, 0x4f, 0x1a, 0x55, 0x49, 0xda, 0x76, 0x06, 0x69, 0x52, 0xfd, 0x69, 0x9c, + 0xba, 0x4a, 0x33, 0x16, 0x48, 0x0f, 0x36, 0x0d, 0x71, 0xea, 0x0e, 0x36, 0x6a, 0xd2, 0xe6, 0xa3, + 0x55, 0x0f, 0x42, 0xa1, 0xd5, 0x12, 0x73, 0x26, 0xed, 0xcf, 0xa1, 0x3e, 0x7f, 0x5e, 0xab, 0x24, + 0x76, 0xfb, 0x17, 0x70, 0x7b, 0xc1, 0x32, 0xff, 0xd3, 0x9d, 0xff, 0x8b, 0x05, 0x6f, 0x5d, 0xe1, + 0x86, 0x10, 0xc8, 0x7f, 0x35, 0x8e, 0x99, 0x36, 0x29, 0xc7, 0xe4, 0x04, 0x0a, 0xc8, 0x7d, 0xd2, + 0x58, 0x97, 0xc4, 0xec, 0xac, 0x42, 0xb6, 0x2b, 0x91, 0x8a, 0x14, 0x65, 0xc5, 0x7e, 0x08, 0x30, + 0x15, 0xae, 0x54, 0xde, 0xbe, 0x81, 0x9a, 0x66, 0x5e, 0xa7, 0x80, 0xba, 0xea, 0x44, 0x34, 0x18, + 0xfb, 0x8c, 0x69, 0x49, 0xc8, 0xad, 0x58, 0x12, 0x9c, 0xef, 0xe0, 0x16, 0x65, 0x9e, 0x7f, 0x18, + 0x0c, 0xd9, 0xf5, 0x99, 0x0f, 0xef, 0x73, 0x30, 0x64, 0x67, 0x9e, 0x78, 0x31, 0xb9, 0xcf, 0x7a, + 0x4e, 0x76, 0xa1, 0x40, 0xbd, 0x70, 0xc0, 0xf4, 0xd2, 0x1f, 0x64, 0x2c, 0x2d, 0x17, 0x41, 0x5d, + 0xaa, 0x20, 0xce, 0x23, 0x28, 0x4f, 0x64, 0x98, 0x8d, 0x9e, 0xf6, 0xfb, 0x09, 0x53, 0x99, 0x2d, + 0x47, 0xf5, 0x0c, 0xe5, 0xc7, 0x2c, 0x1c, 0xe8, 0xa5, 0x73, 0x54, 0xcf, 0x9c, 0x2d, 0x6c, 0x47, + 0x8c, 0xe7, 0x9a, 0x1a, 0x02, 0xf9, 0x7d, 0xec, 0x99, 0x2c, 0x79, 0x89, 0xe4, 0xd8, 0xf1, 0xb1, + 0x94, 0x79, 0xfe, 0x7e, 0xc0, 0xaf, 0xdf, 0x60, 0x03, 0x8a, 0xfb, 0x01, 0x4f, 0xed, 0xcf, 0x4c, + 0xc9, 0x16, 0x16, 0xb9, 0xde, 0x70, 0xe4, 0xe3, 0x6e, 0x05, 0xe3, 0xa1, 0xce, 0xe6, 0x73, 0x52, + 0xe7, 0x0b, 0xc5, 0xa3, 0x5c, 0x45, 0x3b, 0xb3, 0x0d, 0x45, 0x16, 0x0a, 0x1e, 0x30, 0x53, 0x09, + 0x89, 0xab, 0xda, 0x5c, 0x57, 0xb6, 0xb9, 0xb2, 0xe2, 0x52, 0xa3, 0xe2, 0xec, 0xc0, 0x2d, 0x14, + 0x64, 0x1f, 0x04, 0x81, 0x7c, 0xca, 0x49, 0x39, 0x76, 0x76, 0xa1, 0x3e, 0x05, 0xea, 0xa5, 0xb7, + 0x20, 0x8f, 0x4d, 0xb4, 0x4e, 0xd5, 0x8b, 0xd6, 0x95, 0xdf, 0x9d, 0x1a, 0x54, 0xce, 0x82, 0xd0, + 0xd4, 0x3c, 0xe7, 0x8d, 0x05, 0xd5, 0xb3, 0x28, 0x9c, 0x56, 0x9b, 0x33, 0xb8, 0x65, 0x6e, 0xe0, + 0xe3, 0xb3, 0xa3, 0x3d, 0x2f, 0x36, 0x5b, 0x69, 0x5e, 0x3d, 0x66, 0xdd, 0xef, 0xbb, 0x4a, 0xb1, + 0x9b, 0xc7, 0xc2, 0x44, 0xe7, 0xe1, 0xe4, 0x67, 0x50, 0x3c, 0x3e, 0xee, 0x4a, 0x4b, 0xeb, 0x2b, + 0x59, 0x32, 0x30, 0xf2, 0x39, 0x14, 0x9f, 0xcb, 0x67, 0x48, 0xa2, 0x8b, 0xc7, 0x82, 0x90, 0x53, + 0x1b, 0x55, 0x6a, 0x94, 0xf5, 0x22, 0xee, 0x53, 0x03, 0x72, 0xfe, 0x6d, 0xc1, 0xed, 0x53, 0x76, + 0xb9, 0x67, 0x0a, 0xa4, 0x61, 0xbb, 0x09, 0x95, 0x89, 0xec, 0x68, 0x5f, 0xb3, 0x9e, 0x16, 0x91, + 0xf7, 0x61, 0xe3, 0x24, 0x1a, 0x85, 0xc2, 0xb8, 0x5e, 0xc6, 0x3c, 0x23, 0x25, 0x54, 0x7f, 0x20, + 0x3f, 0x82, 0xe2, 0x29, 0x13, 0xf8, 0x4c, 0x92, 0x71, 0xb2, 0xd9, 0xa9, 0xa0, 0xce, 0x29, 0x13, + 0x58, 0xf5, 0xa9, 0xf9, 0x86, 0xad, 0x44, 0x6c, 0x5a, 0x89, 0xfc, 0xa2, 0x56, 0xc2, 0x7c, 0x25, + 0x3b, 0x50, 0xe9, 0x45, 0x61, 0x22, 0xb8, 0x17, 0xe0, 0xc2, 0x05, 0xa9, 0xfc, 0x7f, 0xa8, 0xac, + 0xf6, 0xb3, 0x37, 0xfd, 0x48, 0xd3, 0x9a, 0xce, 0x3b, 0xf0, 0xf6, 0xec, 0x2e, 0x75, 0x1f, 0xf7, + 0x08, 0xfe, 0x9f, 0xb2, 0x21, 0xf3, 0x12, 0xb6, 0x3a, 0x03, 0x8e, 0x0d, 0x8d, 0xab, 0x60, 0x6d, + 0xf8, 0xaf, 0x39, 0xa8, 0x1c, 0xbc, 0x66, 0xbd, 0x13, 0x96, 0x24, 0xde, 0x80, 0x91, 0x77, 0xa1, + 0x7c, 0xc6, 0xa3, 0x1e, 0x4b, 0x92, 0x89, 0xad, 0xa9, 0x80, 0x7c, 0x06, 0xf9, 0xa3, 0x30, 0x10, + 0x3a, 0x63, 0x6f, 0x65, 0xf6, 0x88, 0x81, 0xd0, 0x36, 0xf1, 0x7d, 0x84, 0x53, 0xb2, 0x0b, 0x79, + 0x8c, 0xf7, 0x9b, 0xe4, 0x1c, 0x3f, 0x85, 0x45, 0x0c, 0xe9, 0xca, 0x17, 0x65, 0xf0, 0x2d, 0xd3, + 0xcc, 0xb7, 0xb2, 0x93, 0x65, 0xf0, 0x2d, 0x9b, 0x5a, 0xd0, 0x48, 0x72, 0x00, 0xc5, 0x67, 0xc2, + 0xe3, 0xd8, 0x56, 0xa8, 0x13, 0xb9, 0x97, 0x55, 0x37, 0x95, 0xe6, 0xd4, 0x8a, 0xc1, 0x22, 0x09, + 0x07, 0xaf, 0x03, 0x21, 0x9b, 0x86, 0x6c, 0x12, 0x50, 0x2d, 0xb5, 0x11, 0x9c, 0x22, 0x7a, 0x3f, + 0x0a, 0x59, 0xa3, 0xb8, 0x14, 0x8d, 0x6a, 0x29, 0x34, 0x4e, 0xbb, 0x45, 0x28, 0xc8, 0xa2, 0xea, + 0xfc, 0xc9, 0x82, 0x4a, 0x8a, 0xe3, 0x1b, 0xdc, 0x83, 0x77, 0x21, 0x8f, 0x0f, 0x4a, 0x7d, 0x76, + 0x25, 0x79, 0x0b, 0x98, 0xf0, 0xa8, 0x94, 0x62, 0xd6, 0x3a, 0xf4, 0xd5, 0xdd, 0xac, 0x51, 0x1c, + 0xa2, 0xe4, 0x2b, 0x31, 0x96, 0x74, 0x97, 0x28, 0x0e, 0xc9, 0x36, 0x94, 0x9e, 0xb1, 0xde, 0x88, + 0x07, 0x62, 0x2c, 0x09, 0xdc, 0xec, 0xd4, 0xd1, 0x8a, 0x91, 0xc9, 0xcb, 0x32, 0xd1, 0x70, 0xbe, + 0xc4, 0xc0, 0x9a, 0x3a, 0x48, 0x20, 0xbf, 0x87, 0x6d, 0x35, 0x7a, 0x56, 0xa3, 0x72, 0x8c, 0x2f, + 0x9b, 0x83, 0x65, 0x2f, 0x9b, 0x03, 0xf3, 0xb2, 0x99, 0x3d, 0x10, 0x4c, 0x82, 0x29, 0x82, 0x9c, + 0xc7, 0x50, 0x9e, 0x04, 0x0d, 0x3e, 0x2a, 0x0f, 0x7d, 0xbd, 0xd2, 0xfa, 0xa1, 0x8f, 0x5b, 0x39, + 0x78, 0x7a, 0x28, 0x57, 0x29, 0x51, 0x1c, 0x4e, 0x4a, 0x4e, 0x2e, 0x55, 0x72, 0x76, 0xf0, 0xcd, + 0x96, 0x8a, 0x1c, 0x54, 0xa2, 0xd1, 0x65, 0x62, 0x5c, 0xc6, 0xb1, 0xda, 0xc6, 0x30, 0x91, 0xb6, + 0xe4, 0x36, 0x86, 0x49, 0xe7, 0x3f, 0x25, 0x28, 0x1f, 0x1f, 0x77, 0xbb, 0x3c, 0xf0, 0x07, 0x8c, + 0xfc, 0xce, 0x02, 0x72, 0xf5, 0x29, 0x40, 0x3e, 0xc9, 0x0e, 0xd8, 0xc5, 0xef, 0x19, 0xfb, 0xd3, + 0x15, 0x51, 0xba, 0x02, 0x7c, 0x0d, 0x05, 0xd9, 0x7d, 0x90, 0x1f, 0xdf, 0xb0, 0x33, 0xb4, 0x5b, + 0xcb, 0x15, 0xb5, 0xed, 0x1e, 0x94, 0x4c, 0x05, 0x27, 0xf7, 0x33, 0xdd, 0x9b, 0x69, 0x50, 0xec, + 0x0f, 0x6f, 0xa4, 0xab, 0x17, 0xf9, 0x35, 0x14, 0x75, 0x61, 0x26, 0xf7, 0x96, 0xe0, 0xa6, 0x2d, + 0x82, 0x7d, 0xff, 0x26, 0xaa, 0xd3, 0x6d, 0x98, 0x02, 0x9c, 0xb9, 0x8d, 0xb9, 0xf2, 0x9e, 0xb9, + 0x8d, 0x2b, 0x15, 0xfd, 0x39, 0xe4, 0xb1, 0x52, 0x93, 0xac, 0x6b, 0x9e, 0x2a, 0xe5, 0x76, 0xd6, + 0x71, 0xcd, 0x94, 0xf8, 0x5f, 0x61, 0x3a, 0x94, 0x2f, 0x9a, 0xec, 0x44, 0x98, 0xfa, 0x0b, 0xc2, + 0xbe, 0x77, 0x03, 0xcd, 0xa9, 0x79, 0xd5, 0xbb, 0x67, 0x9a, 0x9f, 0xf9, 0x67, 0x21, 0xd3, 0xfc, + 0xdc, 0x3f, 0x0e, 0x11, 0x54, 0xd3, 0x55, 0x8e, 0xb8, 0x19, 0xd0, 0x05, 0x45, 0xdf, 0x6e, 0xdf, + 0x58, 0x5f, 0x2f, 0xf8, 0x1d, 0x76, 0x9d, 0xb3, 0x15, 0x90, 0x74, 0x32, 0xe9, 0x58, 0x58, 0x6b, + 0xed, 0x07, 0x2b, 0x61, 0xf4, 0xe2, 0x9e, 0xaa, 0xb0, 0xba, 0x8a, 0x92, 0xec, 0x82, 0x31, 0xa9, + 0xc4, 0xf6, 0x0d, 0xf5, 0x5a, 0xd6, 0xc7, 0x56, 0xb7, 0xfa, 0xfd, 0x9b, 0xbb, 0xd6, 0xdf, 0xdf, + 0xdc, 0xb5, 0xfe, 0xf5, 0xe6, 0xae, 0x75, 0xbe, 0x21, 0xff, 0x85, 0x7d, 0xf0, 0xdf, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x8e, 0x01, 0x01, 0xc8, 0xd7, 0x16, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// LLBBridgeClient is the client API for LLBBridge service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type LLBBridgeClient interface { + // apicaps:CapResolveImage + ResolveImageConfig(ctx context.Context, in *ResolveImageConfigRequest, opts ...grpc.CallOption) (*ResolveImageConfigResponse, error) + // apicaps:CapSolveBase + Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) + // apicaps:CapReadFile + ReadFile(ctx context.Context, in *ReadFileRequest, opts ...grpc.CallOption) (*ReadFileResponse, error) + // apicaps:CapReadDir + ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) + // apicaps:CapStatFile + StatFile(ctx context.Context, in *StatFileRequest, opts ...grpc.CallOption) (*StatFileResponse, error) + Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) + Return(ctx context.Context, in *ReturnRequest, opts ...grpc.CallOption) (*ReturnResponse, error) + // apicaps:CapFrontendInputs + Inputs(ctx context.Context, in *InputsRequest, opts ...grpc.CallOption) (*InputsResponse, error) + NewContainer(ctx context.Context, in *NewContainerRequest, opts ...grpc.CallOption) (*NewContainerResponse, error) + ReleaseContainer(ctx context.Context, in *ReleaseContainerRequest, opts ...grpc.CallOption) (*ReleaseContainerResponse, error) + ExecProcess(ctx context.Context, opts ...grpc.CallOption) (LLBBridge_ExecProcessClient, error) +} + +type lLBBridgeClient struct { + cc *grpc.ClientConn +} + +func NewLLBBridgeClient(cc *grpc.ClientConn) LLBBridgeClient { + return &lLBBridgeClient{cc} +} + +func (c *lLBBridgeClient) ResolveImageConfig(ctx context.Context, in *ResolveImageConfigRequest, opts ...grpc.CallOption) (*ResolveImageConfigResponse, error) { + out := new(ResolveImageConfigResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ResolveImageConfig", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *ReturnRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (c *lLBBridgeClient) Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) { + out := new(SolveResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Solve", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (m *ReturnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Error != nil { - { - size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 +func (c *lLBBridgeClient) ReadFile(ctx context.Context, in *ReadFileRequest, opts ...grpc.CallOption) (*ReadFileResponse, error) { + out := new(ReadFileResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ReadFile", in, out, opts...) + if err != nil { + return nil, err } - if m.Result != nil { - { - size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + return out, nil +} + +func (c *lLBBridgeClient) ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) { + out := new(ReadDirResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ReadDir", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func (m *ReturnResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *lLBBridgeClient) StatFile(ctx context.Context, in *StatFileRequest, opts ...grpc.CallOption) (*StatFileResponse, error) { + out := new(StatFileResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/StatFile", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *ReturnResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (c *lLBBridgeClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) { + out := new(PongResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Ping", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (m *ReturnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (c *lLBBridgeClient) Return(ctx context.Context, in *ReturnRequest, opts ...grpc.CallOption) (*ReturnResponse, error) { + out := new(ReturnResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Return", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func (m *InputsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *lLBBridgeClient) Inputs(ctx context.Context, in *InputsRequest, opts ...grpc.CallOption) (*InputsResponse, error) { + out := new(InputsResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Inputs", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *InputsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (c *lLBBridgeClient) NewContainer(ctx context.Context, in *NewContainerRequest, opts ...grpc.CallOption) (*NewContainerResponse, error) { + out := new(NewContainerResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/NewContainer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (m *InputsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (c *lLBBridgeClient) ReleaseContainer(ctx context.Context, in *ReleaseContainerRequest, opts ...grpc.CallOption) (*ReleaseContainerResponse, error) { + out := new(ReleaseContainerResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ReleaseContainer", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func (m *InputsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *lLBBridgeClient) ExecProcess(ctx context.Context, opts ...grpc.CallOption) (LLBBridge_ExecProcessClient, error) { + stream, err := c.cc.NewStream(ctx, &_LLBBridge_serviceDesc.Streams[0], "/moby.buildkit.v1.frontend.LLBBridge/ExecProcess", opts...) if err != nil { return nil, err } - return dAtA[:n], nil + x := &lLBBridgeExecProcessClient{stream} + return x, nil } -func (m *InputsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type LLBBridge_ExecProcessClient interface { + Send(*ExecMessage) error + Recv() (*ExecMessage, error) + grpc.ClientStream } -func (m *InputsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Definitions) > 0 { - for k := range m.Definitions { - v := m.Definitions[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil +type lLBBridgeExecProcessClient struct { + grpc.ClientStream } -func (m *ResolveImageConfigRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { +func (x *lLBBridgeExecProcessClient) Send(m *ExecMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *lLBBridgeExecProcessClient) Recv() (*ExecMessage, error) { + m := new(ExecMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } - return dAtA[:n], nil + return m, nil } -func (m *ResolveImageConfigRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// LLBBridgeServer is the server API for LLBBridge service. +type LLBBridgeServer interface { + // apicaps:CapResolveImage + ResolveImageConfig(context.Context, *ResolveImageConfigRequest) (*ResolveImageConfigResponse, error) + // apicaps:CapSolveBase + Solve(context.Context, *SolveRequest) (*SolveResponse, error) + // apicaps:CapReadFile + ReadFile(context.Context, *ReadFileRequest) (*ReadFileResponse, error) + // apicaps:CapReadDir + ReadDir(context.Context, *ReadDirRequest) (*ReadDirResponse, error) + // apicaps:CapStatFile + StatFile(context.Context, *StatFileRequest) (*StatFileResponse, error) + Ping(context.Context, *PingRequest) (*PongResponse, error) + Return(context.Context, *ReturnRequest) (*ReturnResponse, error) + // apicaps:CapFrontendInputs + Inputs(context.Context, *InputsRequest) (*InputsResponse, error) + NewContainer(context.Context, *NewContainerRequest) (*NewContainerResponse, error) + ReleaseContainer(context.Context, *ReleaseContainerRequest) (*ReleaseContainerResponse, error) + ExecProcess(LLBBridge_ExecProcessServer) error } -func (m *ResolveImageConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.LogName) > 0 { - i -= len(m.LogName) - copy(dAtA[i:], m.LogName) - i = encodeVarintGateway(dAtA, i, uint64(len(m.LogName))) - i-- - dAtA[i] = 0x22 +// UnimplementedLLBBridgeServer can be embedded to have forward compatible implementations. +type UnimplementedLLBBridgeServer struct { +} + +func (*UnimplementedLLBBridgeServer) ResolveImageConfig(ctx context.Context, req *ResolveImageConfigRequest) (*ResolveImageConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResolveImageConfig not implemented") +} +func (*UnimplementedLLBBridgeServer) Solve(ctx context.Context, req *SolveRequest) (*SolveResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Solve not implemented") +} +func (*UnimplementedLLBBridgeServer) ReadFile(ctx context.Context, req *ReadFileRequest) (*ReadFileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadFile not implemented") +} +func (*UnimplementedLLBBridgeServer) ReadDir(ctx context.Context, req *ReadDirRequest) (*ReadDirResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadDir not implemented") +} +func (*UnimplementedLLBBridgeServer) StatFile(ctx context.Context, req *StatFileRequest) (*StatFileResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StatFile not implemented") +} +func (*UnimplementedLLBBridgeServer) Ping(ctx context.Context, req *PingRequest) (*PongResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") +} +func (*UnimplementedLLBBridgeServer) Return(ctx context.Context, req *ReturnRequest) (*ReturnResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Return not implemented") +} +func (*UnimplementedLLBBridgeServer) Inputs(ctx context.Context, req *InputsRequest) (*InputsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Inputs not implemented") +} +func (*UnimplementedLLBBridgeServer) NewContainer(ctx context.Context, req *NewContainerRequest) (*NewContainerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NewContainer not implemented") +} +func (*UnimplementedLLBBridgeServer) ReleaseContainer(ctx context.Context, req *ReleaseContainerRequest) (*ReleaseContainerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReleaseContainer not implemented") +} +func (*UnimplementedLLBBridgeServer) ExecProcess(srv LLBBridge_ExecProcessServer) error { + return status.Errorf(codes.Unimplemented, "method ExecProcess not implemented") +} + +func RegisterLLBBridgeServer(s *grpc.Server, srv LLBBridgeServer) { + s.RegisterService(&_LLBBridge_serviceDesc, srv) +} + +func _LLBBridge_ResolveImageConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResolveImageConfigRequest) + if err := dec(in); err != nil { + return nil, err } - if len(m.ResolveMode) > 0 { - i -= len(m.ResolveMode) - copy(dAtA[i:], m.ResolveMode) - i = encodeVarintGateway(dAtA, i, uint64(len(m.ResolveMode))) - i-- - dAtA[i] = 0x1a + if interceptor == nil { + return srv.(LLBBridgeServer).ResolveImageConfig(ctx, in) } - if m.Platform != nil { - { - size, err := m.Platform.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ResolveImageConfig", } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).ResolveImageConfig(ctx, req.(*ResolveImageConfigRequest)) } - return len(dAtA) - i, nil + return interceptor(ctx, in, info, handler) } -func (m *ResolveImageConfigResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { +func _LLBBridge_Solve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SolveRequest) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil + if interceptor == nil { + return srv.(LLBBridgeServer).Solve(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Solve", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).Solve(ctx, req.(*SolveRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *ResolveImageConfigResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func _LLBBridge_ReadFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadFileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).ReadFile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ReadFile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).ReadFile(ctx, req.(*ReadFileRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *ResolveImageConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func _LLBBridge_ReadDir_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadDirRequest) + if err := dec(in); err != nil { + return nil, err } - if len(m.Config) > 0 { - i -= len(m.Config) - copy(dAtA[i:], m.Config) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Config))) - i-- - dAtA[i] = 0x12 + if interceptor == nil { + return srv.(LLBBridgeServer).ReadDir(ctx, in) } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ReadDir", } - return len(dAtA) - i, nil + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).ReadDir(ctx, req.(*ReadDirRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *SolveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { +func _LLBBridge_StatFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatFileRequest) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil + if interceptor == nil { + return srv.(LLBBridgeServer).StatFile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/StatFile", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).StatFile(ctx, req.(*StatFileRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func _LLBBridge_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).Ping(ctx, req.(*PingRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func _LLBBridge_Return_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReturnRequest) + if err := dec(in); err != nil { + return nil, err } - if len(m.FrontendInputs) > 0 { - for k := range m.FrontendInputs { - v := m.FrontendInputs[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x6a - } + if interceptor == nil { + return srv.(LLBBridgeServer).Return(ctx, in) } - if len(m.CacheImports) > 0 { - for iNdEx := len(m.CacheImports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.CacheImports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Return", } - if len(m.ExporterAttr) > 0 { - i -= len(m.ExporterAttr) - copy(dAtA[i:], m.ExporterAttr) - i = encodeVarintGateway(dAtA, i, uint64(len(m.ExporterAttr))) - i-- - dAtA[i] = 0x5a + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).Return(ctx, req.(*ReturnRequest)) } - if m.Final { - i-- - if m.Final { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 + return interceptor(ctx, in, info, handler) +} + +func _LLBBridge_Inputs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InputsRequest) + if err := dec(in); err != nil { + return nil, err } - if m.AllowResultArrayRef { - i-- - if m.AllowResultArrayRef { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 + if interceptor == nil { + return srv.(LLBBridgeServer).Inputs(ctx, in) } - if m.AllowResultReturn { - i-- - if m.AllowResultReturn { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Inputs", } - if len(m.ImportCacheRefsDeprecated) > 0 { - for iNdEx := len(m.ImportCacheRefsDeprecated) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ImportCacheRefsDeprecated[iNdEx]) - copy(dAtA[i:], m.ImportCacheRefsDeprecated[iNdEx]) - i = encodeVarintGateway(dAtA, i, uint64(len(m.ImportCacheRefsDeprecated[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).Inputs(ctx, req.(*InputsRequest)) } - if len(m.FrontendOpt) > 0 { - for k := range m.FrontendOpt { - v := m.FrontendOpt[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGateway(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } + return interceptor(ctx, in, info, handler) +} + +func _LLBBridge_NewContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NewContainerRequest) + if err := dec(in); err != nil { + return nil, err } - if len(m.Frontend) > 0 { - i -= len(m.Frontend) - copy(dAtA[i:], m.Frontend) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Frontend))) - i-- - dAtA[i] = 0x12 + if interceptor == nil { + return srv.(LLBBridgeServer).NewContainer(ctx, in) } - if m.Definition != nil { - { - size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/NewContainer", } - return len(dAtA) - i, nil + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).NewContainer(ctx, req.(*NewContainerRequest)) + } + return interceptor(ctx, in, info, handler) } -func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) { +func _LLBBridge_ReleaseContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReleaseContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).ReleaseContainer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ReleaseContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).ReleaseContainer(ctx, req.(*ReleaseContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _LLBBridge_ExecProcess_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LLBBridgeServer).ExecProcess(&lLBBridgeExecProcessServer{stream}) +} + +type LLBBridge_ExecProcessServer interface { + Send(*ExecMessage) error + Recv() (*ExecMessage, error) + grpc.ServerStream +} + +type lLBBridgeExecProcessServer struct { + grpc.ServerStream +} + +func (x *lLBBridgeExecProcessServer) Send(m *ExecMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *lLBBridgeExecProcessServer) Recv() (*ExecMessage, error) { + m := new(ExecMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _LLBBridge_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.buildkit.v1.frontend.LLBBridge", + HandlerType: (*LLBBridgeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ResolveImageConfig", + Handler: _LLBBridge_ResolveImageConfig_Handler, + }, + { + MethodName: "Solve", + Handler: _LLBBridge_Solve_Handler, + }, + { + MethodName: "ReadFile", + Handler: _LLBBridge_ReadFile_Handler, + }, + { + MethodName: "ReadDir", + Handler: _LLBBridge_ReadDir_Handler, + }, + { + MethodName: "StatFile", + Handler: _LLBBridge_StatFile_Handler, + }, + { + MethodName: "Ping", + Handler: _LLBBridge_Ping_Handler, + }, + { + MethodName: "Return", + Handler: _LLBBridge_Return_Handler, + }, + { + MethodName: "Inputs", + Handler: _LLBBridge_Inputs_Handler, + }, + { + MethodName: "NewContainer", + Handler: _LLBBridge_NewContainer_Handler, + }, + { + MethodName: "ReleaseContainer", + Handler: _LLBBridge_ReleaseContainer_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ExecProcess", + Handler: _LLBBridge_ExecProcess_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "gateway.proto", +} + +func (m *Result) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2493,12 +2668,12 @@ func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CacheOptionsEntry) MarshalTo(dAtA []byte) (int, error) { +func (m *Result) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Result) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2507,15 +2682,17 @@ func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Attrs) > 0 { - for k := range m.Attrs { - v := m.Attrs[k] + if len(m.Metadata) > 0 { + for k := range m.Metadata { + v := m.Metadata[k] baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGateway(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 + if len(v) > 0 { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGateway(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } i -= len(k) copy(dAtA[i:], k) i = encodeVarintGateway(dAtA, i, uint64(len(k))) @@ -2523,20 +2700,99 @@ func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x52 } } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Type))) + if m.Result != nil { + { + size := m.Result.Size() + i -= size + if _, err := m.Result.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Result_RefDeprecated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Result_RefDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.RefDeprecated) + copy(dAtA[i:], m.RefDeprecated) + i = encodeVarintGateway(dAtA, i, uint64(len(m.RefDeprecated))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *Result_RefsDeprecated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Result_RefsDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.RefsDeprecated != nil { + { + size, err := m.RefsDeprecated.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 } return len(dAtA) - i, nil } +func (m *Result_Ref) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} -func (m *SolveResponse) Marshal() (dAtA []byte, err error) { +func (m *Result_Ref) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Ref != nil { + { + size, err := m.Ref.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *Result_Refs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Result_Refs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Refs != nil { + { + size, err := m.Refs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *RefMapDeprecated) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2546,12 +2802,12 @@ func (m *SolveResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *RefMapDeprecated) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RefMapDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2560,29 +2816,29 @@ func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.Result != nil { - { - size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) + if len(m.Refs) > 0 { + for k := range m.Refs { + v := m.Refs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGateway(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0x1a - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ReadFileRequest) Marshal() (dAtA []byte, err error) { +func (m *Ref) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2592,12 +2848,12 @@ func (m *ReadFileRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReadFileRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *Ref) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReadFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Ref) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2606,9 +2862,9 @@ func (m *ReadFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.Range != nil { + if m.Def != nil { { - size, err := m.Range.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Def.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2616,26 +2872,19 @@ func (m *ReadFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGateway(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a - } - if len(m.FilePath) > 0 { - i -= len(m.FilePath) - copy(dAtA[i:], m.FilePath) - i = encodeVarintGateway(dAtA, i, uint64(len(m.FilePath))) - i-- dAtA[i] = 0x12 } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Id))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *FileRange) Marshal() (dAtA []byte, err error) { +func (m *RefMap) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2645,12 +2894,12 @@ func (m *FileRange) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *FileRange) MarshalTo(dAtA []byte) (int, error) { +func (m *RefMap) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *FileRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RefMap) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2659,20 +2908,36 @@ func (m *FileRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.Length != 0 { - i = encodeVarintGateway(dAtA, i, uint64(m.Length)) - i-- - dAtA[i] = 0x10 - } - if m.Offset != 0 { - i = encodeVarintGateway(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x8 + if len(m.Refs) > 0 { + for k := range m.Refs { + v := m.Refs[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *ReadFileResponse) Marshal() (dAtA []byte, err error) { +func (m *ReturnRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2682,12 +2947,12 @@ func (m *ReadFileResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReadFileResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *ReturnRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReadFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReturnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2696,17 +2961,34 @@ func (m *ReadFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Data))) + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ReadDirRequest) Marshal() (dAtA []byte, err error) { +func (m *ReturnResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2716,12 +2998,12 @@ func (m *ReadDirRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReadDirRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *ReturnResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReadDirRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ReturnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2730,31 +3012,10 @@ func (m *ReadDirRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.IncludePattern) > 0 { - i -= len(m.IncludePattern) - copy(dAtA[i:], m.IncludePattern) - i = encodeVarintGateway(dAtA, i, uint64(len(m.IncludePattern))) - i-- - dAtA[i] = 0x1a - } - if len(m.DirPath) > 0 { - i -= len(m.DirPath) - copy(dAtA[i:], m.DirPath) - i = encodeVarintGateway(dAtA, i, uint64(len(m.DirPath))) - i-- - dAtA[i] = 0x12 - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } return len(dAtA) - i, nil } -func (m *ReadDirResponse) Marshal() (dAtA []byte, err error) { +func (m *InputsRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2764,12 +3025,12 @@ func (m *ReadDirResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ReadDirResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *InputsRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReadDirResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *InputsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2778,24 +3039,10 @@ func (m *ReadDirResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Entries) > 0 { - for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } -func (m *StatFileRequest) Marshal() (dAtA []byte, err error) { +func (m *InputsResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2805,12 +3052,12 @@ func (m *StatFileRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StatFileRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *InputsResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StatFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *InputsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2819,24 +3066,36 @@ func (m *StatFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa + if len(m.Definitions) > 0 { + for k := range m.Definitions { + v := m.Definitions[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *StatFileResponse) Marshal() (dAtA []byte, err error) { +func (m *ResolveImageConfigRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2846,12 +3105,12 @@ func (m *StatFileResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StatFileResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *ResolveImageConfigRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StatFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResolveImageConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2860,9 +3119,23 @@ func (m *StatFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if m.Stat != nil { + if len(m.LogName) > 0 { + i -= len(m.LogName) + copy(dAtA[i:], m.LogName) + i = encodeVarintGateway(dAtA, i, uint64(len(m.LogName))) + i-- + dAtA[i] = 0x22 + } + if len(m.ResolveMode) > 0 { + i -= len(m.ResolveMode) + copy(dAtA[i:], m.ResolveMode) + i = encodeVarintGateway(dAtA, i, uint64(len(m.ResolveMode))) + i-- + dAtA[i] = 0x1a + } + if m.Platform != nil { { - size, err := m.Stat.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Platform.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2870,12 +3143,19 @@ func (m *StatFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGateway(dAtA, i, uint64(size)) } i-- + dAtA[i] = 0x12 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *PingRequest) Marshal() (dAtA []byte, err error) { +func (m *ResolveImageConfigResponse) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2885,12 +3165,12 @@ func (m *PingRequest) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PingRequest) MarshalTo(dAtA []byte) (int, error) { +func (m *ResolveImageConfigResponse) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PingRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ResolveImageConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2899,10 +3179,24 @@ func (m *PingRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Config) > 0 { + i -= len(m.Config) + copy(dAtA[i:], m.Config) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Config))) + i-- + dAtA[i] = 0x12 + } + if len(m.Digest) > 0 { + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } -func (m *PongResponse) Marshal() (dAtA []byte, err error) { +func (m *SolveRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2912,12 +3206,12 @@ func (m *PongResponse) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PongResponse) MarshalTo(dAtA []byte) (int, error) { +func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PongResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -2926,24 +3220,36 @@ func (m *PongResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Workers) > 0 { - for iNdEx := len(m.Workers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Workers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.FrontendInputs) > 0 { + for k := range m.FrontendInputs { + v := m.FrontendInputs[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a } } - if len(m.LLBCaps) > 0 { - for iNdEx := len(m.LLBCaps) - 1; iNdEx >= 0; iNdEx-- { + if len(m.CacheImports) > 0 { + for iNdEx := len(m.CacheImports) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.LLBCaps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.CacheImports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2951,587 +3257,3244 @@ func (m *PongResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGateway(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x62 } } - if len(m.FrontendAPICaps) > 0 { - for iNdEx := len(m.FrontendAPICaps) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.FrontendAPICaps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } + if len(m.ExporterAttr) > 0 { + i -= len(m.ExporterAttr) + copy(dAtA[i:], m.ExporterAttr) + i = encodeVarintGateway(dAtA, i, uint64(len(m.ExporterAttr))) + i-- + dAtA[i] = 0x5a + } + if m.Final { + i-- + if m.Final { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.AllowResultArrayRef { + i-- + if m.AllowResultArrayRef { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.AllowResultReturn { + i-- + if m.AllowResultReturn { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.ImportCacheRefsDeprecated) > 0 { + for iNdEx := len(m.ImportCacheRefsDeprecated) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ImportCacheRefsDeprecated[iNdEx]) + copy(dAtA[i:], m.ImportCacheRefsDeprecated[iNdEx]) + i = encodeVarintGateway(dAtA, i, uint64(len(m.ImportCacheRefsDeprecated[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.FrontendOpt) > 0 { + for k := range m.FrontendOpt { + v := m.FrontendOpt[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGateway(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) i-- dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Frontend) > 0 { + i -= len(m.Frontend) + copy(dAtA[i:], m.Frontend) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Frontend))) + i-- + dAtA[i] = 0x12 + } + if m.Definition != nil { + { + size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func encodeVarintGateway(dAtA []byte, offset int, v uint64) int { - offset -= sovGateway(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *Result) Size() (n int) { - if m == nil { - return 0 - } + +func (m *CacheOptionsEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Result != nil { - n += m.Result.Size() + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - l = 0 - if len(v) > 0 { - l = 1 + len(v) + sovGateway(uint64(len(v))) - } - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + if len(m.Attrs) > 0 { + for k := range m.Attrs { + v := m.Attrs[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGateway(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintGateway(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *Result_RefDeprecated) Size() (n int) { - if m == nil { - return 0 +func (m *SolveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = len(m.RefDeprecated) - n += 1 + l + sovGateway(uint64(l)) - return n + return dAtA[:n], nil } -func (m *Result_RefsDeprecated) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RefsDeprecated != nil { - l = m.RefsDeprecated.Size() - n += 1 + l + sovGateway(uint64(l)) - } - return n + +func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Result_Ref) Size() (n int) { - if m == nil { - return 0 - } + +func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Ref != nil { - l = m.Ref.Size() - n += 1 + l + sovGateway(uint64(l)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - return n -} -func (m *Result_Refs) Size() (n int) { - if m == nil { - return 0 + if m.Result != nil { + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - var l int - _ = l - if m.Refs != nil { - l = m.Refs.Size() - n += 1 + l + sovGateway(uint64(l)) + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *RefMapDeprecated) Size() (n int) { - if m == nil { - return 0 + +func (m *ReadFileRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ReadFileRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Refs) > 0 { - for k, v := range m.Refs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Range != nil { + { + size, err := m.Range.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if len(m.FilePath) > 0 { + i -= len(m.FilePath) + copy(dAtA[i:], m.FilePath) + i = encodeVarintGateway(dAtA, i, uint64(len(m.FilePath))) + i-- + dAtA[i] = 0x12 } - return n + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *Ref) Size() (n int) { - if m == nil { - return 0 +func (m *FileRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *FileRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FileRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if m.Def != nil { - l = m.Def.Size() - n += 1 + l + sovGateway(uint64(l)) + if m.Length != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x10 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Offset != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Offset)) + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *RefMap) Size() (n int) { - if m == nil { - return 0 +func (m *ReadFileResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ReadFileResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Refs) > 0 { - for k, v := range m.Refs { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovGateway(uint64(l)) - } - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) - } - } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - return n + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ReturnRequest) Size() (n int) { - if m == nil { - return 0 +func (m *ReadDirRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ReadDirRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadDirRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGateway(uint64(l)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if m.Error != nil { - l = m.Error.Size() - n += 1 + l + sovGateway(uint64(l)) + if len(m.IncludePattern) > 0 { + i -= len(m.IncludePattern) + copy(dAtA[i:], m.IncludePattern) + i = encodeVarintGateway(dAtA, i, uint64(len(m.IncludePattern))) + i-- + dAtA[i] = 0x1a } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if len(m.DirPath) > 0 { + i -= len(m.DirPath) + copy(dAtA[i:], m.DirPath) + i = encodeVarintGateway(dAtA, i, uint64(len(m.DirPath))) + i-- + dAtA[i] = 0x12 } - return n + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ReturnResponse) Size() (n int) { - if m == nil { - return 0 +func (m *ReadDirResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ReadDirResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReadDirResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - return n + if len(m.Entries) > 0 { + for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil } -func (m *InputsRequest) Size() (n int) { - if m == nil { - return 0 +func (m *StatFileRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *StatFileRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - return n + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + } + if len(m.Ref) > 0 { + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *InputsResponse) Size() (n int) { - if m == nil { - return 0 +func (m *StatFileResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *StatFileResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Definitions) > 0 { - for k, v := range m.Definitions { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovGateway(uint64(l)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Stat != nil { + { + size, err := m.Stat.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return len(dAtA) - i, nil } -func (m *ResolveImageConfigRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.Platform != nil { - l = m.Platform.Size() - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.ResolveMode) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.LogName) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (m *PingRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ResolveImageConfigResponse) Size() (n int) { - if m == nil { - return 0 - } +func (m *PingRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PingRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.Config) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - return n + return len(dAtA) - i, nil } -func (m *SolveRequest) Size() (n int) { - if m == nil { - return 0 +func (m *PongResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *PongResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PongResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Definition != nil { - l = m.Definition.Size() - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.Frontend) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if len(m.FrontendOpt) > 0 { - for k, v := range m.FrontendOpt { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + if len(m.Workers) > 0 { + for iNdEx := len(m.Workers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Workers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - if len(m.ImportCacheRefsDeprecated) > 0 { - for _, s := range m.ImportCacheRefsDeprecated { - l = len(s) - n += 1 + l + sovGateway(uint64(l)) + if len(m.LLBCaps) > 0 { + for iNdEx := len(m.LLBCaps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.LLBCaps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if m.AllowResultReturn { - n += 2 + if len(m.FrontendAPICaps) > 0 { + for iNdEx := len(m.FrontendAPICaps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FrontendAPICaps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } } - if m.AllowResultArrayRef { - n += 2 + return len(dAtA) - i, nil +} + +func (m *NewContainerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if m.Final { - n += 2 + return dAtA[:n], nil +} + +func (m *NewContainerRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NewContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - l = len(m.ExporterAttr) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) + if m.Constraints != nil { + { + size, err := m.Constraints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - if len(m.CacheImports) > 0 { - for _, e := range m.CacheImports { - l = e.Size() - n += 1 + l + sovGateway(uint64(l)) + if m.Platform != nil { + { + size, err := m.Platform.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - if len(m.FrontendInputs) > 0 { - for k, v := range m.FrontendInputs { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovGateway(uint64(l)) + if m.Network != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Network)) + i-- + dAtA[i] = 0x18 + } + if len(m.Mounts) > 0 { + for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) } - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + i-- + dAtA[i] = 0x12 } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if len(m.ContainerID) > 0 { + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarintGateway(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *CacheOptionsEntry) Size() (n int) { - if m == nil { - return 0 +func (m *NewContainerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *NewContainerResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NewContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if len(m.Attrs) > 0 { - for k, v := range m.Attrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) - } - } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - return n + return len(dAtA) - i, nil } -func (m *SolveResponse) Size() (n int) { - if m == nil { - return 0 +func (m *ReleaseContainerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ReleaseContainerRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReleaseContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGateway(uint64(l)) - } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - return n + if len(m.ContainerID) > 0 { + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarintGateway(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *ReadFileRequest) Size() (n int) { - if m == nil { - return 0 +func (m *ReleaseContainerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ReleaseContainerResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReleaseContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.FilePath) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.Range != nil { - l = m.Range.Size() - n += 1 + l + sovGateway(uint64(l)) - } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - return n + return len(dAtA) - i, nil } -func (m *FileRange) Size() (n int) { - if m == nil { - return 0 +func (m *ExecMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ExecMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Offset != 0 { - n += 1 + sovGateway(uint64(m.Offset)) + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - if m.Length != 0 { - n += 1 + sovGateway(uint64(m.Length)) + if m.Input != nil { + { + size := m.Input.Size() + i -= size + if _, err := m.Input.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if len(m.ProcessID) > 0 { + i -= len(m.ProcessID) + copy(dAtA[i:], m.ProcessID) + i = encodeVarintGateway(dAtA, i, uint64(len(m.ProcessID))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ReadFileResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +func (m *ExecMessage_Init) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReadDirRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) +func (m *ExecMessage_Init) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Init != nil { + { + size, err := m.Init.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - l = len(m.DirPath) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) + return len(dAtA) - i, nil +} +func (m *ExecMessage_File) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecMessage_File) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.File != nil { + { + size, err := m.File.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - l = len(m.IncludePattern) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) + return len(dAtA) - i, nil +} +func (m *ExecMessage_Resize) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecMessage_Resize) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Resize != nil { + { + size, err := m.Resize.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + return len(dAtA) - i, nil +} +func (m *ExecMessage_Started) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecMessage_Started) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Started != nil { + { + size, err := m.Started.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - return n + return len(dAtA) - i, nil +} +func (m *ExecMessage_Exit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ReadDirResponse) Size() (n int) { - if m == nil { - return 0 +func (m *ExecMessage_Exit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Exit != nil { + { + size, err := m.Exit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - var l int - _ = l - if len(m.Entries) > 0 { - for _, e := range m.Entries { - l = e.Size() - n += 1 + l + sovGateway(uint64(l)) + return len(dAtA) - i, nil +} +func (m *ExecMessage_Done) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecMessage_Done) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Done != nil { + { + size, err := m.Done.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x3a } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + return len(dAtA) - i, nil +} +func (m *InitMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *StatFileRequest) Size() (n int) { - if m == nil { - return 0 - } +func (m *InitMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *InitMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.Path) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - return n -} - -func (m *StatFileResponse) Size() (n int) { - if m == nil { - return 0 + if m.Security != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Security)) + i-- + dAtA[i] = 0x28 } - var l int - _ = l - if m.Stat != nil { - l = m.Stat.Size() - n += 1 + l + sovGateway(uint64(l)) + if m.Tty { + i-- + if m.Tty { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Fds) > 0 { + dAtA24 := make([]byte, len(m.Fds)*10) + var j23 int + for _, num := range m.Fds { + for num >= 1<<7 { + dAtA24[j23] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j23++ + } + dAtA24[j23] = uint8(num) + j23++ + } + i -= j23 + copy(dAtA[i:], dAtA24[:j23]) + i = encodeVarintGateway(dAtA, i, uint64(j23)) + i-- + dAtA[i] = 0x1a } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + if m.Meta != nil { + { + size, err := m.Meta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - return n + if len(m.ContainerID) > 0 { + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarintGateway(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *PingRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (m *ExitMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *PongResponse) Size() (n int) { - if m == nil { - return 0 - } +func (m *ExitMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExitMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.FrontendAPICaps) > 0 { - for _, e := range m.FrontendAPICaps { + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Error != nil { + { + size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Code != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StartedMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartedMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartedMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *DoneMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoneMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoneMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *FdMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FdMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FdMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a + } + if m.EOF { + i-- + if m.EOF { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Fd != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Fd)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResizeMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResizeMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResizeMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Cols != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Cols)) + i-- + dAtA[i] = 0x10 + } + if m.Rows != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Rows)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintGateway(dAtA []byte, offset int, v uint64) int { + offset -= sovGateway(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Result) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + n += m.Result.Size() + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + l = 0 + if len(v) > 0 { + l = 1 + len(v) + sovGateway(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Result_RefDeprecated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RefDeprecated) + n += 1 + l + sovGateway(uint64(l)) + return n +} +func (m *Result_RefsDeprecated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RefsDeprecated != nil { + l = m.RefsDeprecated.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} +func (m *Result_Ref) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Ref != nil { + l = m.Ref.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} +func (m *Result_Refs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Refs != nil { + l = m.Refs.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} +func (m *RefMapDeprecated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Refs) > 0 { + for k, v := range m.Refs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Ref) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.Def != nil { + l = m.Def.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RefMap) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Refs) > 0 { + for k, v := range m.Refs { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovGateway(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReturnRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReturnResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InputsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *InputsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Definitions) > 0 { + for k, v := range m.Definitions { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovGateway(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResolveImageConfigRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.Platform != nil { + l = m.Platform.Size() + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.ResolveMode) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.LogName) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResolveImageConfigResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.Config) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SolveRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Definition != nil { + l = m.Definition.Size() + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.Frontend) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.FrontendOpt) > 0 { + for k, v := range m.FrontendOpt { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if len(m.ImportCacheRefsDeprecated) > 0 { + for _, s := range m.ImportCacheRefsDeprecated { + l = len(s) + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.AllowResultReturn { + n += 2 + } + if m.AllowResultArrayRef { + n += 2 + } + if m.Final { + n += 2 + } + l = len(m.ExporterAttr) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.CacheImports) > 0 { + for _, e := range m.CacheImports { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if len(m.FrontendInputs) > 0 { + for k, v := range m.FrontendInputs { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovGateway(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *CacheOptionsEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) + n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SolveResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadFileRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.FilePath) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.Range != nil { + l = m.Range.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *FileRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sovGateway(uint64(m.Offset)) + } + if m.Length != 0 { + n += 1 + sovGateway(uint64(m.Length)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadFileResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadDirRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.DirPath) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.IncludePattern) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReadDirResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StatFileRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ref) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StatFileResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Stat != nil { + l = m.Stat.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PingRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PongResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.FrontendAPICaps) > 0 { + for _, e := range m.FrontendAPICaps { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if len(m.LLBCaps) > 0 { + for _, e := range m.LLBCaps { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if len(m.Workers) > 0 { + for _, e := range m.Workers { l = e.Size() n += 1 + l + sovGateway(uint64(l)) } } - if len(m.LLBCaps) > 0 { - for _, e := range m.LLBCaps { - l = e.Size() - n += 1 + l + sovGateway(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *NewContainerRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.Network != 0 { + n += 1 + sovGateway(uint64(m.Network)) + } + if m.Platform != nil { + l = m.Platform.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.Constraints != nil { + l = m.Constraints.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *NewContainerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReleaseContainerRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ReleaseContainerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProcessID) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.Input != nil { + n += m.Input.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExecMessage_Init) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Init != nil { + l = m.Init.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} +func (m *ExecMessage_File) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.File != nil { + l = m.File.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} +func (m *ExecMessage_Resize) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Resize != nil { + l = m.Resize.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} +func (m *ExecMessage_Started) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Started != nil { + l = m.Started.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} +func (m *ExecMessage_Exit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exit != nil { + l = m.Exit.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} +func (m *ExecMessage_Done) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Done != nil { + l = m.Done.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} +func (m *InitMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.Meta != nil { + l = m.Meta.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.Fds) > 0 { + l = 0 + for _, e := range m.Fds { + l += sovGateway(uint64(e)) + } + n += 1 + sovGateway(uint64(l)) + l + } + if m.Tty { + n += 2 + } + if m.Security != 0 { + n += 1 + sovGateway(uint64(m.Security)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ExitMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovGateway(uint64(m.Code)) + } + if m.Error != nil { + l = m.Error.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StartedMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *DoneMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *FdMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Fd != 0 { + n += 1 + sovGateway(uint64(m.Fd)) + } + if m.EOF { + n += 2 + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResizeMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Rows != 0 { + n += 1 + sovGateway(uint64(m.Rows)) + } + if m.Cols != 0 { + n += 1 + sovGateway(uint64(m.Cols)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovGateway(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGateway(x uint64) (n int) { + return sovGateway(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Result) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Result: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Result: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RefDeprecated", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Result = &Result_RefDeprecated{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RefsDeprecated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RefMapDeprecated{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Result = &Result_RefsDeprecated{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Ref{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Result = &Result_Ref{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RefMap{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Result = &Result_Refs{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string][]byte) + } + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGateway + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthGateway + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RefMapDeprecated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RefMapDeprecated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RefMapDeprecated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Refs == nil { + m.Refs = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Refs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ref) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ref: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ref: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Def == nil { + m.Def = &pb.Definition{} + } + if err := m.Def.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RefMap) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RefMap: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RefMap: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Refs == nil { + m.Refs = make(map[string]*Ref) + } + var mapkey string + var mapvalue *Ref + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGateway + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGateway + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Ref{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Refs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReturnRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReturnRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReturnRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &Result{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Error == nil { + m.Error = &rpc.Status{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReturnResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReturnResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReturnResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InputsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InputsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InputsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy } } - if len(m.Workers) > 0 { - for _, e := range m.Workers { - l = e.Size() - n += 1 + l + sovGateway(uint64(l)) + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InputsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InputsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InputsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Definitions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Definitions == nil { + m.Definitions = make(map[string]*pb.Definition) + } + var mapkey string + var mapvalue *pb.Definition + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGateway + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGateway + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGateway + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGateway + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &pb.Definition{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Definitions[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy } } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} -func sovGateway(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGateway(x uint64) (n int) { - return sovGateway(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *Result) Unmarshal(dAtA []byte) error { +func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3554,15 +6517,15 @@ func (m *Result) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Result: wiretype end group for non-group") + return fmt.Errorf("proto: ResolveImageConfigRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Result: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResolveImageConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RefDeprecated", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3590,11 +6553,11 @@ func (m *Result) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Result = &Result_RefDeprecated{string(dAtA[iNdEx:postIndex])} + m.Ref = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RefsDeprecated", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3621,17 +6584,18 @@ func (m *Result) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RefMapDeprecated{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Platform == nil { + m.Platform = &pb.Platform{} + } + if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Result = &Result_RefsDeprecated{v} iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResolveMode", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -3641,32 +6605,29 @@ func (m *Result) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGateway } if postIndex > l { return io.ErrUnexpectedEOF } - v := &Ref{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Result = &Result_Ref{v} + m.ResolveMode = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LogName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -3676,32 +6637,83 @@ func (m *Result) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGateway } if postIndex > l { return io.ErrUnexpectedEOF } - v := &RefMap{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.LogName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { return err } - m.Result = &Result_Refs{v} - iNdEx = postIndex - case 10: + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveImageConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveImageConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveImageConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -3711,119 +6723,57 @@ func (m *Result) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGateway } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = make(map[string][]byte) + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) } - var mapkey string - mapvalue := []byte{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGateway - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex < 0 { - return ErrInvalidLengthGateway - } - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = append(m.Config[:0], dAtA[iNdEx:postIndex]...) + if m.Config == nil { + m.Config = []byte{} } - m.Metadata[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -3850,7 +6800,7 @@ func (m *Result) Unmarshal(dAtA []byte) error { } return nil } -func (m *RefMapDeprecated) Unmarshal(dAtA []byte) error { +func (m *SolveRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3873,15 +6823,15 @@ func (m *RefMapDeprecated) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RefMapDeprecated: wiretype end group for non-group") + return fmt.Errorf("proto: SolveRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RefMapDeprecated: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SolveRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3908,8 +6858,76 @@ func (m *RefMapDeprecated) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Refs == nil { - m.Refs = make(map[string]string) + if m.Definition == nil { + m.Definition = &pb.Definition{} + } + if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Frontend = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FrontendOpt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FrontendOpt == nil { + m.FrontendOpt = make(map[string]string) } var mapkey string var mapvalue string @@ -4004,65 +7022,11 @@ func (m *RefMapDeprecated) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Refs[mapkey] = mapvalue + m.FrontendOpt[mapkey] = mapvalue iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Ref) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Ref: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Ref: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImportCacheRefsDeprecated", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4090,13 +7054,73 @@ func (m *Ref) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Id = string(dAtA[iNdEx:postIndex]) + m.ImportCacheRefsDeprecated = append(m.ImportCacheRefsDeprecated, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowResultReturn", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowResultReturn = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowResultArrayRef", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowResultArrayRef = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Final", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Final = bool(v != 0) + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttr", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -4106,85 +7130,63 @@ func (m *Ref) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthGateway } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Def == nil { - m.Def = &pb.Definition{} - } - if err := m.Def.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.ExporterAttr = append(m.ExporterAttr[:0], dAtA[iNdEx:postIndex]...) + if m.ExporterAttr == nil { + m.ExporterAttr = []byte{} } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CacheImports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if skippy < 0 { + if msglen < 0 { return ErrInvalidLengthGateway } - if (iNdEx + skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGateway } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RefMap) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.CacheImports = append(m.CacheImports, &CacheOptionsEntry{}) + if err := m.CacheImports[len(m.CacheImports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RefMap: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RefMap: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FrontendInputs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4211,11 +7213,11 @@ func (m *RefMap) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Refs == nil { - m.Refs = make(map[string]*Ref) + if m.FrontendInputs == nil { + m.FrontendInputs = make(map[string]*pb.Definition) } var mapkey string - var mapvalue *Ref + var mapvalue *pb.Definition for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -4285,212 +7287,32 @@ func (m *RefMap) Unmarshal(dAtA []byte) error { postmsgIndex := iNdEx + mapmsglen if postmsgIndex < 0 { return ErrInvalidLengthGateway - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Ref{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Refs[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReturnRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReturnRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReturnRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &Result{} - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Error == nil { - m.Error = &rpc.Status{} - } - if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReturnResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &pb.Definition{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReturnResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReturnResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + m.FrontendInputs[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -4516,7 +7338,7 @@ func (m *ReturnResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *InputsRequest) Unmarshal(dAtA []byte) error { +func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4539,69 +7361,47 @@ func (m *InputsRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: InputsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: CacheOptionsEntry: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: InputsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CacheOptionsEntry: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - if skippy < 0 { - return ErrInvalidLengthGateway + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGateway } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InputsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InputsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InputsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Definitions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4628,11 +7428,11 @@ func (m *InputsResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Definitions == nil { - m.Definitions = make(map[string]*pb.Definition) + if m.Attrs == nil { + m.Attrs = make(map[string]string) } var mapkey string - var mapvalue *pb.Definition + var mapvalue string for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -4681,7 +7481,7 @@ func (m *InputsResponse) Unmarshal(dAtA []byte) error { mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var mapmsglen int + var stringLenmapvalue uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -4691,26 +7491,24 @@ func (m *InputsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= int(b&0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if mapmsglen < 0 { + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { return ErrInvalidLengthGateway } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { return ErrInvalidLengthGateway } - if postmsgIndex > l { + if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } - mapvalue = &pb.Definition{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue } else { iNdEx = entryPreIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -4726,7 +7524,129 @@ func (m *InputsResponse) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Definitions[mapkey] = mapvalue + m.Attrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SolveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SolveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SolveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Result == nil { + m.Result = &Result{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -4753,7 +7673,7 @@ func (m *InputsResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { +func (m *ReadFileRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4776,10 +7696,10 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResolveImageConfigRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReadFileRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResolveImageConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReadFileRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -4816,7 +7736,39 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4843,18 +7795,72 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Platform == nil { - m.Platform = &pb.Platform{} + if m.Range == nil { + m.Range = &FileRange{} } - if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResolveMode", wireType) + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err } - var stringLen uint64 + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + m.Offset = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -4864,29 +7870,89 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Offset |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGateway } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.ResolveMode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadFileResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadFileResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadFileResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LogName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -4896,23 +7962,25 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthGateway } if postIndex > l { return io.ErrUnexpectedEOF } - m.LogName = string(dAtA[iNdEx:postIndex]) + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex @@ -4939,7 +8007,7 @@ func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResolveImageConfigResponse) Unmarshal(dAtA []byte) error { +func (m *ReadDirRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4949,28 +8017,60 @@ func (m *ResolveImageConfigResponse) Unmarshal(dAtA []byte) error { if shift >= 64 { return ErrIntOverflowGateway } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadDirRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadDirRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if postIndex > l { + return io.ErrUnexpectedEOF } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResolveImageConfigResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResolveImageConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DirPath", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4998,13 +8098,13 @@ func (m *ResolveImageConfigResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + m.DirPath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IncludePattern", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -5014,25 +8114,23 @@ func (m *ResolveImageConfigResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGateway } if postIndex > l { return io.ErrUnexpectedEOF } - m.Config = append(m.Config[:0], dAtA[iNdEx:postIndex]...) - if m.Config == nil { - m.Config = []byte{} - } + m.IncludePattern = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5059,7 +8157,7 @@ func (m *ResolveImageConfigResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *SolveRequest) Unmarshal(dAtA []byte) error { +func (m *ReadDirResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5082,15 +8180,15 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SolveRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ReadDirResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SolveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ReadDirResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5117,175 +8215,68 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Definition == nil { - m.Definition = &pb.Definition{} - } - if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Entries = append(m.Entries, &types.Stat{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if skippy < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGateway } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Frontend = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FrontendOpt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatFileRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - if m.FrontendOpt == nil { - m.FrontendOpt = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.FrontendOpt[mapkey] = mapvalue - iNdEx = postIndex - case 4: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatFileRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatFileRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImportCacheRefsDeprecated", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5313,13 +8304,13 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ImportCacheRefsDeprecated = append(m.ImportCacheRefsDeprecated, string(dAtA[iNdEx:postIndex])) + m.Ref = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowResultReturn", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -5329,37 +8320,83 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.AllowResultReturn = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowResultArrayRef", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway } - m.AllowResultArrayRef = bool(v != 0) - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Final", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - var v int + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatFileResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatFileResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatFileResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stat", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -5369,17 +8406,141 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Final = bool(v != 0) - case 11: + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Stat == nil { + m.Stat = &types.Stat{} + } + if err := m.Stat.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PingRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PongResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PongResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PongResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttr", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FrontendAPICaps", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -5389,29 +8550,29 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGateway } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExporterAttr = append(m.ExporterAttr[:0], dAtA[iNdEx:postIndex]...) - if m.ExporterAttr == nil { - m.ExporterAttr = []byte{} + m.FrontendAPICaps = append(m.FrontendAPICaps, pb1.APICap{}) + if err := m.FrontendAPICaps[len(m.FrontendAPICaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex - case 12: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CacheImports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LLBCaps", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5438,14 +8599,14 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.CacheImports = append(m.CacheImports, &CacheOptionsEntry{}) - if err := m.CacheImports[len(m.CacheImports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.LLBCaps = append(m.LLBCaps, pb1.APICap{}) + if err := m.LLBCaps[len(m.LLBCaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 13: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FrontendInputs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Workers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5472,105 +8633,10 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.FrontendInputs == nil { - m.FrontendInputs = make(map[string]*pb.Definition) - } - var mapkey string - var mapvalue *pb.Definition - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGateway - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGateway - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &pb.Definition{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + m.Workers = append(m.Workers, &types1.WorkerRecord{}) + if err := m.Workers[len(m.Workers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.FrontendInputs[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -5597,7 +8663,7 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { +func (m *NewContainerRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5620,15 +8686,15 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CacheOptionsEntry: wiretype end group for non-group") + return fmt.Errorf("proto: NewContainerRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CacheOptionsEntry: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NewContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5656,11 +8722,11 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5687,103 +8753,101 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Attrs == nil { - m.Attrs = make(map[string]string) + m.Mounts = append(m.Mounts, &pb.Mount{}) + if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + m.Network = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Network |= pb.NetMode(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Platform == nil { + m.Platform = &pb.Platform{} + } + if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.Attrs[mapkey] = mapvalue + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Constraints == nil { + m.Constraints = &pb.WorkerConstraints{} + } + if err := m.Constraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -5810,7 +8874,7 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { } return nil } -func (m *SolveResponse) Unmarshal(dAtA []byte) error { +func (m *NewContainerResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5833,15 +8897,69 @@ func (m *SolveResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SolveResponse: wiretype end group for non-group") + return fmt.Errorf("proto: NewContainerResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SolveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NewContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReleaseContainerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReleaseContainerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReleaseContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5869,44 +8987,62 @@ func (m *SolveResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ref = string(dAtA[iNdEx:postIndex]) + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if skippy < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (iNdEx + skippy) < 0 { return ErrInvalidLengthGateway } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.Result == nil { - m.Result = &Result{} + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReleaseContainerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReleaseContainerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReleaseContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -5932,7 +9068,7 @@ func (m *SolveResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReadFileRequest) Unmarshal(dAtA []byte) error { +func (m *ExecMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5955,15 +9091,15 @@ func (m *ReadFileRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReadFileRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExecMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReadFileRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProcessID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5991,13 +9127,13 @@ func (m *ReadFileRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ref = string(dAtA[iNdEx:postIndex]) + m.ProcessID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FilePath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Init", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -6007,27 +9143,30 @@ func (m *ReadFileRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGateway } if postIndex > l { return io.ErrUnexpectedEOF } - m.FilePath = string(dAtA[iNdEx:postIndex]) + v := &InitMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Input = &ExecMessage_Init{v} iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6054,72 +9193,52 @@ func (m *ReadFileRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Range == nil { - m.Range = &FileRange{} - } - if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + v := &FdMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } + m.Input = &ExecMessage_File{v} iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FileRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resize", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FileRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FileRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + if msglen < 0 { + return ErrInvalidLengthGateway } - m.Offset = 0 + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResizeMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Input = &ExecMessage_Resize{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -6129,16 +9248,32 @@ func (m *FileRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Offset |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + if msglen < 0 { + return ErrInvalidLengthGateway } - m.Length = 0 + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &StartedMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Input = &ExecMessage_Started{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exit", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -6148,70 +9283,32 @@ func (m *FileRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Length |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if msglen < 0 { return ErrInvalidLengthGateway } - if (iNdEx + skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGateway } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadFileResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + v := &ExitMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadFileResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadFileResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Input = &ExecMessage_Exit{v} + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Done", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -6221,25 +9318,26 @@ func (m *ReadFileResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGateway } if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} + v := &DoneMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } + m.Input = &ExecMessage_Done{v} iNdEx = postIndex default: iNdEx = preIndex @@ -6266,7 +9364,7 @@ func (m *ReadFileResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *ReadDirRequest) Unmarshal(dAtA []byte) error { +func (m *InitMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6289,15 +9387,15 @@ func (m *ReadDirRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ReadDirRequest: wiretype end group for non-group") + return fmt.Errorf("proto: InitMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ReadDirRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InitMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6325,13 +9423,13 @@ func (m *ReadDirRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Ref = string(dAtA[iNdEx:postIndex]) + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DirPath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -6341,115 +9439,109 @@ func (m *ReadDirRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGateway } if postIndex > l { return io.ErrUnexpectedEOF } - m.DirPath = string(dAtA[iNdEx:postIndex]) + if m.Meta == nil { + m.Meta = &pb.Meta{} + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludePattern", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + m.Fds = append(m.Fds, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } } + elementCount = count + if elementCount != 0 && len(m.Fds) == 0 { + m.Fds = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Fds = append(m.Fds, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Fds", wireType) } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IncludePattern = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadDirResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadDirResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadDirResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) - } - var msglen int + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Tty", wireType) + } + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -6459,26 +9551,31 @@ func (m *ReadDirResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Tty = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Security", wireType) } - m.Entries = append(m.Entries, &types.Stat{}) - if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Security = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Security |= pb.SecurityMode(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -6504,7 +9601,7 @@ func (m *ReadDirResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *StatFileRequest) Unmarshal(dAtA []byte) error { +func (m *ExitMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6527,17 +9624,17 @@ func (m *StatFileRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StatFileRequest: wiretype end group for non-group") + return fmt.Errorf("proto: ExitMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StatFileRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExitMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) } - var stringLen uint64 + m.Code = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -6547,29 +9644,16 @@ func (m *StatFileRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Code |= uint32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -6579,23 +9663,27 @@ func (m *StatFileRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGateway } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + if m.Error == nil { + m.Error = &rpc.Status{} + } + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -6622,7 +9710,7 @@ func (m *StatFileRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *StatFileResponse) Unmarshal(dAtA []byte) error { +func (m *StartedMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6645,48 +9733,12 @@ func (m *StatFileResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StatFileResponse: wiretype end group for non-group") + return fmt.Errorf("proto: StartedMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StatFileResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StartedMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stat", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Stat == nil { - m.Stat = &types.Stat{} - } - if err := m.Stat.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -6712,7 +9764,7 @@ func (m *StatFileResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *PingRequest) Unmarshal(dAtA []byte) error { +func (m *DoneMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6735,10 +9787,10 @@ func (m *PingRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") + return fmt.Errorf("proto: DoneMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DoneMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -6766,7 +9818,7 @@ func (m *PingRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *PongResponse) Unmarshal(dAtA []byte) error { +func (m *FdMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6789,17 +9841,17 @@ func (m *PongResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PongResponse: wiretype end group for non-group") + return fmt.Errorf("proto: FdMessage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PongResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FdMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FrontendAPICaps", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Fd", wireType) } - var msglen int + m.Fd = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -6809,31 +9861,36 @@ func (m *PongResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Fd |= uint32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EOF", wireType) } - m.FrontendAPICaps = append(m.FrontendAPICaps, pb1.APICap{}) - if err := m.FrontendAPICaps[len(m.FrontendAPICaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 2: + m.EOF = bool(v != 0) + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LLBCaps", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -6843,31 +9900,85 @@ func (m *PongResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthGateway } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthGateway } if postIndex > l { return io.ErrUnexpectedEOF } - m.LLBCaps = append(m.LLBCaps, pb1.APICap{}) - if err := m.LLBCaps[len(m.LLBCaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workers", wireType) + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err } - var msglen int + if skippy < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResizeMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResizeMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResizeMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Rows", wireType) + } + m.Rows = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGateway @@ -6877,26 +9988,30 @@ func (m *PongResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Rows |= uint32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cols", wireType) } - m.Workers = append(m.Workers, &types1.WorkerRecord{}) - if err := m.Workers[len(m.Workers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Cols = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Cols |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto index 8b4725e2a6ae3a8e0795d44005fc1fe8fafcdaa8..ea870e297fd346a4b3cc5807f9bf50a1f6e7255c 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto @@ -28,6 +28,10 @@ service LLBBridge { rpc Return(ReturnRequest) returns (ReturnResponse); // apicaps:CapFrontendInputs rpc Inputs(InputsRequest) returns (InputsResponse); + + rpc NewContainer(NewContainerRequest) returns (NewContainerResponse); + rpc ReleaseContainer(ReleaseContainerRequest) returns (ReleaseContainerResponse); + rpc ExecProcess(stream ExecMessage) returns (stream ExecMessage); } message Result { @@ -162,3 +166,71 @@ message PongResponse{ repeated moby.buildkit.v1.apicaps.APICap LLBCaps = 2 [(gogoproto.nullable) = false]; repeated moby.buildkit.v1.types.WorkerRecord Workers = 3; } + +message NewContainerRequest { + string ContainerID = 1; + // For mount input values we can use random identifiers passed with ref + repeated pb.Mount Mounts = 2; + pb.NetMode Network = 3; + pb.Platform platform = 4; + pb.WorkerConstraints constraints = 5; +} + +message NewContainerResponse{} + +message ReleaseContainerRequest { + string ContainerID = 1; +} + +message ReleaseContainerResponse{} + +message ExecMessage { + string ProcessID = 1; + oneof Input { + // InitMessage sent from client to server will start a new process in a + // container + InitMessage Init = 2; + // FdMessage used from client to server for input (stdin) and + // from server to client for output (stdout, stderr) + FdMessage File = 3; + // ResizeMessage used from client to server for terminal resize events + ResizeMessage Resize = 4; + // StartedMessage sent from server to client after InitMessage to + // indicate the process has started. + StartedMessage Started = 5; + // ExitMessage sent from server to client will contain the exit code + // when the process ends. + ExitMessage Exit = 6; + // DoneMessage from server to client will be the last message for any + // process. Note that FdMessage might be sent after ExitMessage. + DoneMessage Done = 7; + } +} + +message InitMessage{ + string ContainerID = 1; + pb.Meta Meta = 2; + repeated uint32 Fds = 3; + bool Tty = 4; + pb.SecurityMode Security = 5; +} + +message ExitMessage { + uint32 Code = 1; + google.rpc.Status Error = 2; +} + +message StartedMessage{} + +message DoneMessage{} + +message FdMessage{ + uint32 Fd = 1; // what fd the data was from + bool EOF = 2; // true if eof was reached + bytes Data = 3; +} + +message ResizeMessage{ + uint32 Rows = 1; + uint32 Cols = 2; +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go index 4ab07c6d4a75c0bc78aba3bc4e3620e6ef8b64be..e17b9daf6b1a2d51e497d9d8fa29f16bd284a22f 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go @@ -1,3 +1,3 @@ -package moby_buildkit_v1_frontend +package moby_buildkit_v1_frontend //nolint:golint //go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. gateway.proto diff --git a/vendor/github.com/moby/buildkit/frontend/subrequests/describe.go b/vendor/github.com/moby/buildkit/frontend/subrequests/describe.go new file mode 100644 index 0000000000000000000000000000000000000000..cc8053ed24d654e20c58ca05f915c3d27598c7c1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/subrequests/describe.go @@ -0,0 +1,63 @@ +package subrequests + +import ( + "context" + "encoding/json" + + "github.com/moby/buildkit/frontend/gateway/client" + gwpb "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/solver/errdefs" + "github.com/pkg/errors" +) + +const RequestSubrequestsDescribe = "frontend.subrequests.describe" + +var SubrequestsDescribeDefinition = Request{ + Name: RequestSubrequestsDescribe, + Version: "1.0.0", + Type: TypeRPC, + Description: "List available subrequest types", + Metadata: []Named{ + { + Name: "result.json", + }, + }, +} + +func Describe(ctx context.Context, c client.Client) ([]Request, error) { + gwcaps := c.BuildOpts().Caps + + if err := (&gwcaps).Supports(gwpb.CapFrontendCaps); err != nil { + return nil, errdefs.NewUnsupportedSubrequestError(RequestSubrequestsDescribe) + } + + res, err := c.Solve(ctx, client.SolveRequest{ + FrontendOpt: map[string]string{ + "requestid": RequestSubrequestsDescribe, + "frontend.caps": "moby.buildkit.frontend.subrequests", + }, + Frontend: "dockerfile.v0", + }) + if err != nil { + var reqErr *errdefs.UnsupportedSubrequestError + if errors.As(err, &reqErr) { + return nil, err + } + var capErr *errdefs.UnsupportedFrontendCapError + if errors.As(err, &capErr) { + return nil, errdefs.NewUnsupportedSubrequestError(RequestSubrequestsDescribe) + } + return nil, err + } + + dt, ok := res.Metadata["result.json"] + if !ok { + return nil, errors.Errorf("no result.json metadata in response") + } + + var reqs []Request + if err := json.Unmarshal(dt, &reqs); err != nil { + return nil, errors.Wrap(err, "failed to parse describe result") + } + return reqs, nil +} diff --git a/vendor/github.com/moby/buildkit/frontend/subrequests/types.go b/vendor/github.com/moby/buildkit/frontend/subrequests/types.go new file mode 100644 index 0000000000000000000000000000000000000000..db62afe1cdcae68ca273e32efd799a25275a4c4c --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/subrequests/types.go @@ -0,0 +1,21 @@ +package subrequests + +type Request struct { + Name string `json:"name"` + Version string `json:"version"` + Type RequestType `json:"type"` + Description string `json:"description"` + Opts []Named `json:"opts"` + Inputs []Named `json:"inputs"` + Metadata []Named `json:"metadata"` + Refs []Named `json:"refs"` +} + +type Named struct { + Name string `json:"name"` + Description string `json:"description"` +} + +type RequestType string + +const TypeRPC RequestType = "rpc" diff --git a/vendor/github.com/moby/buildkit/go.mod b/vendor/github.com/moby/buildkit/go.mod index f92a2b7ce7b1e9c09be2f8bfed91ea9e8e72754f..6736953a469a7846f5e90f88e18033bc7b226fae 100644 --- a/vendor/github.com/moby/buildkit/go.mod +++ b/vendor/github.com/moby/buildkit/go.mod @@ -3,76 +3,77 @@ module github.com/moby/buildkit go 1.13 require ( - github.com/AkihiroSuda/containerd-fuse-overlayfs v0.0.0-20200512015515-32086ef23a5a - github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect + github.com/AkihiroSuda/containerd-fuse-overlayfs v1.0.0 github.com/BurntSushi/toml v0.3.1 - github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 - github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7 // indirect + github.com/Microsoft/go-winio v0.4.15-0.20200908182639-5b44b70ab3ab + github.com/Microsoft/hcsshim v0.8.9 github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58 // indirect - github.com/containerd/cgroups v0.0.0-20200327175542-b44481373989 // indirect - github.com/containerd/console v1.0.0 - github.com/containerd/containerd v1.4.0-0 - github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb - github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b // indirect - github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75 - github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328 - github.com/coreos/go-systemd/v22 v22.0.0 - github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24 + github.com/containerd/console v1.0.1 + github.com/containerd/containerd v1.4.1-0.20200903181227-d4e78200d6da + github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe + github.com/containerd/go-cni v1.0.1 + github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0 + github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116 + github.com/containerd/typeurl v1.0.1 + github.com/coreos/go-systemd/v22 v22.1.0 + github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible github.com/docker/distribution v2.7.1+incompatible - github.com/docker/docker v0.0.0 - github.com/docker/docker-credential-helpers v0.6.0 // indirect - github.com/docker/go-connections v0.3.0 - github.com/docker/libnetwork v0.8.0-dev.2.0.20200226230617-d8334ccdb9be - github.com/gofrs/flock v0.7.0 + github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible + github.com/docker/go-connections v0.4.0 + github.com/docker/libnetwork v0.8.0-dev.2.0.20200917202933-d0951081b35f + github.com/gofrs/flock v0.7.3 github.com/gogo/googleapis v1.3.2 github.com/gogo/protobuf v1.3.1 - github.com/golang/protobuf v1.3.3 - github.com/google/go-cmp v0.4.0 - github.com/google/shlex v0.0.0-20150127133951-6f45313302b9 - github.com/google/uuid v1.1.1 // indirect - github.com/gorilla/mux v1.7.4 // indirect + // protobuf: the actual version is replaced in replace() + github.com/golang/protobuf v1.4.2 + github.com/google/go-cmp v0.4.1 + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 + github.com/gorilla/mux v1.8.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 github.com/hashicorp/go-immutable-radix v1.0.0 - github.com/hashicorp/golang-lru v0.5.1 + github.com/hashicorp/golang-lru v0.5.3 github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c // indirect - github.com/imdario/mergo v0.3.9 // indirect github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea github.com/mitchellh/hashstructure v1.0.0 - github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c + github.com/moby/locker v1.0.1 + github.com/moby/sys/mount v0.1.1 // indirect; force more current version of sys/mount than go mod selects automatically + github.com/moby/sys/mountinfo v0.4.0 // indirect; force more current version of sys/mountinfo than go mod selects automatically + github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2 // indirect + github.com/morikuni/aec v1.0.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.0.1 - github.com/opencontainers/runc v1.0.0-rc10 - github.com/opencontainers/runtime-spec v1.0.2 - github.com/opencontainers/selinux v1.5.1 // indirect - github.com/opentracing-contrib/go-stdlib v0.0.0-20171029140428-b1a47cfbdd75 - github.com/opentracing/opentracing-go v1.1.0 + github.com/opencontainers/runc v1.0.0-rc92 + github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6 + github.com/opentracing-contrib/go-stdlib v1.0.0 + github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 - github.com/pkg/profile v1.2.1 + github.com/pkg/profile v1.5.0 github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 - github.com/sirupsen/logrus v1.4.2 + github.com/sirupsen/logrus v1.7.0 github.com/stretchr/testify v1.5.1 - github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect - github.com/tonistiigi/fsutil v0.0.0-20200512175118-ae3a8d753069 + github.com/tonistiigi/fsutil v0.0.0-20200724193237-c3ed55f3b481 github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea - github.com/uber/jaeger-client-go v2.11.2+incompatible - github.com/uber/jaeger-lib v1.2.1 // indirect + github.com/uber/jaeger-client-go v2.25.0+incompatible + github.com/uber/jaeger-lib v2.2.0+incompatible // indirect github.com/urfave/cli v1.22.2 - github.com/vishvananda/netlink v1.1.0 // indirect - go.etcd.io/bbolt v1.3.3 - golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d - golang.org/x/net v0.0.0-20200226121028-0de0cce0169b - golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e - golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 - golang.org/x/time v0.0.0-20191024005414-555d28b269f0 - google.golang.org/genproto v0.0.0-20200227132054-3f1135a288c9 - google.golang.org/grpc v1.27.1 + go.etcd.io/bbolt v1.3.5 + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 + golang.org/x/net v0.0.0-20200707034311-ab3426394381 + golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 + golang.org/x/sys v0.0.0-20200922070232-aee5d888a860 + golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 + // genproto: the actual version is replaced in replace() + google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece + google.golang.org/grpc v1.29.1 ) replace ( - github.com/containerd/containerd => github.com/containerd/containerd v1.3.1-0.20200512144102-f13ba8f2f2fd - github.com/docker/docker => github.com/docker/docker v17.12.0-ce-rc1.0.20200310163718-4634ce647cf2+incompatible + // protobuf: corresponds to containerd + github.com/golang/protobuf => github.com/golang/protobuf v1.3.5 github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305 + // genproto: corresponds to containerd + google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 ) diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.go b/vendor/github.com/moby/buildkit/session/auth/auth.go index 6a65eb8dc752b397d8c8bf45332db9a549aea28f..864ed5dd7fa43adfb141c9937bf28fe276914af9 100644 --- a/vendor/github.com/moby/buildkit/session/auth/auth.go +++ b/vendor/github.com/moby/buildkit/session/auth/auth.go @@ -2,16 +2,33 @@ package auth import ( "context" + "crypto/subtle" + "math/rand" + "sync" "github.com/moby/buildkit/session" "github.com/moby/buildkit/util/grpcerrors" + "github.com/pkg/errors" + "golang.org/x/crypto/nacl/sign" "google.golang.org/grpc/codes" ) -func CredentialsFunc(sm *session.Manager, g session.Group) func(string) (string, string, error) { - return func(host string) (string, string, error) { - var user, secret string - err := sm.Any(context.TODO(), g, func(ctx context.Context, _ string, c session.Caller) error { +var salt []byte +var saltOnce sync.Once + +// getSalt returns unique component per daemon restart to avoid persistent keys +func getSalt() []byte { + saltOnce.Do(func() { + salt = make([]byte, 32) + rand.Read(salt) + }) + return salt +} + +func CredentialsFunc(sm *session.Manager, g session.Group) func(string) (session, username, secret string, err error) { + return func(host string) (string, string, string, error) { + var sessionID, user, secret string + err := sm.Any(context.TODO(), g, func(ctx context.Context, id string, c session.Caller) error { client := NewAuthClient(c.Conn()) resp, err := client.Credentials(ctx, &CredentialsRequest{ @@ -23,13 +40,91 @@ func CredentialsFunc(sm *session.Manager, g session.Group) func(string) (string, } return err } + sessionID = id user = resp.Username secret = resp.Secret return nil }) if err != nil { - return "", "", err + return "", "", "", err + } + return sessionID, user, secret, nil + } +} + +func FetchToken(req *FetchTokenRequest, sm *session.Manager, g session.Group) (resp *FetchTokenResponse, err error) { + err = sm.Any(context.TODO(), g, func(ctx context.Context, id string, c session.Caller) error { + client := NewAuthClient(c.Conn()) + + resp, err = client.FetchToken(ctx, req) + if err != nil { + return err + } + return nil + }) + if err != nil { + return nil, err + } + return resp, nil +} + +func VerifyTokenAuthority(host string, pubKey *[32]byte, sm *session.Manager, g session.Group) (sessionID string, ok bool, err error) { + var verified bool + err = sm.Any(context.TODO(), g, func(ctx context.Context, id string, c session.Caller) error { + client := NewAuthClient(c.Conn()) + + payload := make([]byte, 32) + rand.Read(payload) + resp, err := client.VerifyTokenAuthority(ctx, &VerifyTokenAuthorityRequest{ + Host: host, + Salt: getSalt(), + Payload: payload, + }) + if err != nil { + if grpcerrors.Code(err) == codes.Unimplemented { + return nil + } + return err + } + var dt []byte + dt, ok = sign.Open(nil, resp.Signed, pubKey) + if ok && subtle.ConstantTimeCompare(dt, payload) == 1 { + verified = true + } + sessionID = id + return nil + }) + if err != nil { + return "", false, err + } + return sessionID, verified, nil +} + +func GetTokenAuthority(host string, sm *session.Manager, g session.Group) (sessionID string, pubKey *[32]byte, err error) { + err = sm.Any(context.TODO(), g, func(ctx context.Context, id string, c session.Caller) error { + client := NewAuthClient(c.Conn()) + + resp, err := client.GetTokenAuthority(ctx, &GetTokenAuthorityRequest{ + Host: host, + Salt: getSalt(), + }) + if err != nil { + if grpcerrors.Code(err) == codes.Unimplemented || grpcerrors.Code(err) == codes.Unavailable { + return nil + } + return err + } + if len(resp.PublicKey) != 32 { + return errors.Errorf("invalid pubkey length %d", len(pubKey)) } - return user, secret, nil + + sessionID = id + pubKey = new([32]byte) + copy((*pubKey)[:], resp.PublicKey) + return nil + }) + if err != nil { + return "", nil, err } + return sessionID, pubKey, nil } diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.pb.go b/vendor/github.com/moby/buildkit/session/auth/auth.pb.go index 04afb3d6958cc28642c21fa80dc770874fd16fd7..ffe60e93dfcdb0f645a47cb8d68f94780872ee30 100644 --- a/vendor/github.com/moby/buildkit/session/auth/auth.pb.go +++ b/vendor/github.com/moby/buildkit/session/auth/auth.pb.go @@ -4,6 +4,7 @@ package auth import ( + bytes "bytes" context "context" fmt "fmt" proto "github.com/gogo/protobuf/proto" @@ -122,30 +123,384 @@ func (m *CredentialsResponse) GetSecret() string { return "" } +type FetchTokenRequest struct { + ClientID string `protobuf:"bytes,1,opt,name=ClientID,proto3" json:"ClientID,omitempty"` + Host string `protobuf:"bytes,2,opt,name=Host,proto3" json:"Host,omitempty"` + Realm string `protobuf:"bytes,3,opt,name=Realm,proto3" json:"Realm,omitempty"` + Service string `protobuf:"bytes,4,opt,name=Service,proto3" json:"Service,omitempty"` + Scopes []string `protobuf:"bytes,5,rep,name=Scopes,proto3" json:"Scopes,omitempty"` +} + +func (m *FetchTokenRequest) Reset() { *m = FetchTokenRequest{} } +func (*FetchTokenRequest) ProtoMessage() {} +func (*FetchTokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{2} +} +func (m *FetchTokenRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FetchTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FetchTokenRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FetchTokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FetchTokenRequest.Merge(m, src) +} +func (m *FetchTokenRequest) XXX_Size() int { + return m.Size() +} +func (m *FetchTokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FetchTokenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_FetchTokenRequest proto.InternalMessageInfo + +func (m *FetchTokenRequest) GetClientID() string { + if m != nil { + return m.ClientID + } + return "" +} + +func (m *FetchTokenRequest) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *FetchTokenRequest) GetRealm() string { + if m != nil { + return m.Realm + } + return "" +} + +func (m *FetchTokenRequest) GetService() string { + if m != nil { + return m.Service + } + return "" +} + +func (m *FetchTokenRequest) GetScopes() []string { + if m != nil { + return m.Scopes + } + return nil +} + +type FetchTokenResponse struct { + Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` + ExpiresIn int64 `protobuf:"varint,2,opt,name=ExpiresIn,proto3" json:"ExpiresIn,omitempty"` + IssuedAt int64 `protobuf:"varint,3,opt,name=IssuedAt,proto3" json:"IssuedAt,omitempty"` +} + +func (m *FetchTokenResponse) Reset() { *m = FetchTokenResponse{} } +func (*FetchTokenResponse) ProtoMessage() {} +func (*FetchTokenResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{3} +} +func (m *FetchTokenResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FetchTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FetchTokenResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FetchTokenResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_FetchTokenResponse.Merge(m, src) +} +func (m *FetchTokenResponse) XXX_Size() int { + return m.Size() +} +func (m *FetchTokenResponse) XXX_DiscardUnknown() { + xxx_messageInfo_FetchTokenResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_FetchTokenResponse proto.InternalMessageInfo + +func (m *FetchTokenResponse) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + +func (m *FetchTokenResponse) GetExpiresIn() int64 { + if m != nil { + return m.ExpiresIn + } + return 0 +} + +func (m *FetchTokenResponse) GetIssuedAt() int64 { + if m != nil { + return m.IssuedAt + } + return 0 +} + +type GetTokenAuthorityRequest struct { + Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` + Salt []byte `protobuf:"bytes,2,opt,name=Salt,proto3" json:"Salt,omitempty"` +} + +func (m *GetTokenAuthorityRequest) Reset() { *m = GetTokenAuthorityRequest{} } +func (*GetTokenAuthorityRequest) ProtoMessage() {} +func (*GetTokenAuthorityRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{4} +} +func (m *GetTokenAuthorityRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetTokenAuthorityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetTokenAuthorityRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetTokenAuthorityRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTokenAuthorityRequest.Merge(m, src) +} +func (m *GetTokenAuthorityRequest) XXX_Size() int { + return m.Size() +} +func (m *GetTokenAuthorityRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetTokenAuthorityRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTokenAuthorityRequest proto.InternalMessageInfo + +func (m *GetTokenAuthorityRequest) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *GetTokenAuthorityRequest) GetSalt() []byte { + if m != nil { + return m.Salt + } + return nil +} + +type GetTokenAuthorityResponse struct { + PublicKey []byte `protobuf:"bytes,1,opt,name=PublicKey,proto3" json:"PublicKey,omitempty"` +} + +func (m *GetTokenAuthorityResponse) Reset() { *m = GetTokenAuthorityResponse{} } +func (*GetTokenAuthorityResponse) ProtoMessage() {} +func (*GetTokenAuthorityResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{5} +} +func (m *GetTokenAuthorityResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetTokenAuthorityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetTokenAuthorityResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetTokenAuthorityResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetTokenAuthorityResponse.Merge(m, src) +} +func (m *GetTokenAuthorityResponse) XXX_Size() int { + return m.Size() +} +func (m *GetTokenAuthorityResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetTokenAuthorityResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetTokenAuthorityResponse proto.InternalMessageInfo + +func (m *GetTokenAuthorityResponse) GetPublicKey() []byte { + if m != nil { + return m.PublicKey + } + return nil +} + +type VerifyTokenAuthorityRequest struct { + Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` + Payload []byte `protobuf:"bytes,2,opt,name=Payload,proto3" json:"Payload,omitempty"` + Salt []byte `protobuf:"bytes,3,opt,name=Salt,proto3" json:"Salt,omitempty"` +} + +func (m *VerifyTokenAuthorityRequest) Reset() { *m = VerifyTokenAuthorityRequest{} } +func (*VerifyTokenAuthorityRequest) ProtoMessage() {} +func (*VerifyTokenAuthorityRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{6} +} +func (m *VerifyTokenAuthorityRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VerifyTokenAuthorityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VerifyTokenAuthorityRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VerifyTokenAuthorityRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VerifyTokenAuthorityRequest.Merge(m, src) +} +func (m *VerifyTokenAuthorityRequest) XXX_Size() int { + return m.Size() +} +func (m *VerifyTokenAuthorityRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VerifyTokenAuthorityRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VerifyTokenAuthorityRequest proto.InternalMessageInfo + +func (m *VerifyTokenAuthorityRequest) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *VerifyTokenAuthorityRequest) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +func (m *VerifyTokenAuthorityRequest) GetSalt() []byte { + if m != nil { + return m.Salt + } + return nil +} + +type VerifyTokenAuthorityResponse struct { + Signed []byte `protobuf:"bytes,1,opt,name=Signed,proto3" json:"Signed,omitempty"` +} + +func (m *VerifyTokenAuthorityResponse) Reset() { *m = VerifyTokenAuthorityResponse{} } +func (*VerifyTokenAuthorityResponse) ProtoMessage() {} +func (*VerifyTokenAuthorityResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8bbd6f3875b0e874, []int{7} +} +func (m *VerifyTokenAuthorityResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VerifyTokenAuthorityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VerifyTokenAuthorityResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VerifyTokenAuthorityResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VerifyTokenAuthorityResponse.Merge(m, src) +} +func (m *VerifyTokenAuthorityResponse) XXX_Size() int { + return m.Size() +} +func (m *VerifyTokenAuthorityResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VerifyTokenAuthorityResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VerifyTokenAuthorityResponse proto.InternalMessageInfo + +func (m *VerifyTokenAuthorityResponse) GetSigned() []byte { + if m != nil { + return m.Signed + } + return nil +} + func init() { proto.RegisterType((*CredentialsRequest)(nil), "moby.filesync.v1.CredentialsRequest") proto.RegisterType((*CredentialsResponse)(nil), "moby.filesync.v1.CredentialsResponse") + proto.RegisterType((*FetchTokenRequest)(nil), "moby.filesync.v1.FetchTokenRequest") + proto.RegisterType((*FetchTokenResponse)(nil), "moby.filesync.v1.FetchTokenResponse") + proto.RegisterType((*GetTokenAuthorityRequest)(nil), "moby.filesync.v1.GetTokenAuthorityRequest") + proto.RegisterType((*GetTokenAuthorityResponse)(nil), "moby.filesync.v1.GetTokenAuthorityResponse") + proto.RegisterType((*VerifyTokenAuthorityRequest)(nil), "moby.filesync.v1.VerifyTokenAuthorityRequest") + proto.RegisterType((*VerifyTokenAuthorityResponse)(nil), "moby.filesync.v1.VerifyTokenAuthorityResponse") } func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) } var fileDescriptor_8bbd6f3875b0e874 = []byte{ - // 233 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x2c, 0x2d, 0xc9, - 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, 0x4b, 0xcb, 0xcc, - 0x49, 0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x33, 0x54, 0xd2, 0xe0, 0x12, 0x72, 0x2e, 0x4a, 0x4d, - 0x49, 0xcd, 0x2b, 0xc9, 0x4c, 0xcc, 0x29, 0x0e, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x11, 0x12, - 0xe2, 0x62, 0xf1, 0xc8, 0x2f, 0x2e, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x95, - 0x3c, 0xb9, 0x84, 0x51, 0x54, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x0a, 0x49, 0x71, 0x71, 0x84, - 0x16, 0xa7, 0x16, 0xe5, 0x25, 0xe6, 0xa6, 0x42, 0x95, 0xc3, 0xf9, 0x42, 0x62, 0x5c, 0x6c, 0xc1, - 0xa9, 0xc9, 0x45, 0xa9, 0x25, 0x12, 0x4c, 0x60, 0x19, 0x28, 0xcf, 0x28, 0x89, 0x8b, 0xc5, 0xb1, - 0xb4, 0x24, 0x43, 0x28, 0x8a, 0x8b, 0x1b, 0xc9, 0x48, 0x21, 0x15, 0x3d, 0x74, 0xe7, 0xe9, 0x61, - 0xba, 0x4d, 0x4a, 0x95, 0x80, 0x2a, 0x88, 0xbb, 0x9c, 0xac, 0x2e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, - 0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, - 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, - 0x7c, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, - 0x31, 0x44, 0xb1, 0x80, 0x02, 0x2b, 0x89, 0x0d, 0x1c, 0x5a, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x64, 0x61, 0x71, 0x59, 0x3b, 0x01, 0x00, 0x00, + // 513 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xc7, 0xbd, 0x75, 0xd2, 0x36, 0x43, 0x0f, 0x74, 0x89, 0x90, 0x31, 0xd1, 0xaa, 0x32, 0x45, + 0xaa, 0x40, 0x58, 0x02, 0x24, 0x24, 0xb8, 0xb5, 0xe5, 0x2b, 0xe2, 0x52, 0x39, 0x7c, 0x48, 0xbd, + 0x20, 0xc7, 0x9e, 0x12, 0x0b, 0xc7, 0x0e, 0xde, 0x75, 0x85, 0x6f, 0xdc, 0xb9, 0xf0, 0x08, 0x1c, + 0x79, 0x14, 0x8e, 0x39, 0xf6, 0x48, 0x9c, 0x0b, 0xc7, 0x3c, 0x02, 0xf2, 0x66, 0x9d, 0x04, 0x1c, + 0xd2, 0xdc, 0xfc, 0x1f, 0xff, 0x77, 0xe6, 0xb7, 0x33, 0xa3, 0x05, 0x70, 0x53, 0xd1, 0xb3, 0x07, + 0x49, 0x2c, 0x62, 0x7a, 0xb5, 0x1f, 0x77, 0x33, 0xfb, 0x2c, 0x08, 0x91, 0x67, 0x91, 0x67, 0x9f, + 0xdf, 0xb7, 0x0e, 0x80, 0x1e, 0x27, 0xe8, 0x63, 0x24, 0x02, 0x37, 0xe4, 0x0e, 0x7e, 0x4a, 0x91, + 0x0b, 0x4a, 0xa1, 0xf6, 0x32, 0xe6, 0xc2, 0x20, 0x7b, 0xe4, 0xa0, 0xe1, 0xc8, 0x6f, 0xab, 0x0d, + 0xd7, 0xfe, 0x72, 0xf2, 0x41, 0x1c, 0x71, 0xa4, 0x26, 0x6c, 0xbf, 0xe1, 0x98, 0x44, 0x6e, 0x1f, + 0x95, 0x7d, 0xa6, 0xe9, 0x75, 0xd8, 0xec, 0xa0, 0x97, 0xa0, 0x30, 0x36, 0xe4, 0x1f, 0xa5, 0xac, + 0xaf, 0x04, 0x76, 0x9f, 0xa3, 0xf0, 0x7a, 0xaf, 0xe3, 0x8f, 0x18, 0x95, 0x45, 0x4d, 0xd8, 0x3e, + 0x0e, 0x03, 0x8c, 0x44, 0xfb, 0x69, 0x99, 0xa9, 0xd4, 0x33, 0xa0, 0x8d, 0x39, 0x10, 0x6d, 0x42, + 0xdd, 0x41, 0x37, 0xec, 0x1b, 0xba, 0x0c, 0x4e, 0x05, 0x35, 0x60, 0xab, 0x83, 0xc9, 0x79, 0xe0, + 0xa1, 0x51, 0x93, 0xf1, 0x52, 0x4a, 0x1a, 0x2f, 0x1e, 0x20, 0x37, 0xea, 0x7b, 0xba, 0xa4, 0x91, + 0xca, 0xf2, 0x81, 0x2e, 0xc2, 0xa8, 0x7b, 0x35, 0xa1, 0x2e, 0x03, 0x0a, 0x65, 0x2a, 0x68, 0x0b, + 0x1a, 0xcf, 0x3e, 0x0f, 0x82, 0x04, 0x79, 0x3b, 0x92, 0x30, 0xba, 0x33, 0x0f, 0x14, 0x37, 0x68, + 0x73, 0x9e, 0xa2, 0x7f, 0x28, 0x24, 0x94, 0xee, 0xcc, 0xb4, 0x75, 0x04, 0xc6, 0x0b, 0x14, 0x32, + 0xcb, 0x61, 0x2a, 0x7a, 0x71, 0x12, 0x88, 0x6c, 0x45, 0xbb, 0x8b, 0x58, 0xc7, 0x0d, 0xa7, 0x37, + 0xde, 0x71, 0xe4, 0xb7, 0xf5, 0x18, 0x6e, 0x2c, 0xc9, 0xa1, 0x80, 0x5b, 0xd0, 0x38, 0x49, 0xbb, + 0x61, 0xe0, 0xbd, 0xc2, 0x4c, 0x66, 0xda, 0x71, 0xe6, 0x01, 0xeb, 0x3d, 0xdc, 0x7c, 0x8b, 0x49, + 0x70, 0x96, 0xad, 0x4f, 0x60, 0xc0, 0xd6, 0x89, 0x9b, 0x85, 0xb1, 0xeb, 0x2b, 0x88, 0x52, 0xce, + 0xd8, 0xf4, 0x05, 0xb6, 0x47, 0xd0, 0x5a, 0x5e, 0x40, 0xe1, 0x15, 0xdd, 0x0f, 0x3e, 0x44, 0xe8, + 0x2b, 0x36, 0xa5, 0x1e, 0x7c, 0xd7, 0xa1, 0x56, 0xb8, 0xe9, 0x29, 0x5c, 0x59, 0xd8, 0x2f, 0xba, + 0x6f, 0xff, 0xbb, 0xab, 0x76, 0x75, 0x51, 0xcd, 0xdb, 0x97, 0xb8, 0x54, 0xf1, 0x77, 0x00, 0xf3, + 0x11, 0xd3, 0x5b, 0xd5, 0x43, 0x95, 0x6d, 0x34, 0xf7, 0x57, 0x9b, 0x54, 0xe2, 0x10, 0x76, 0x2b, + 0x13, 0xa1, 0x77, 0xaa, 0x47, 0xff, 0x37, 0x7a, 0xf3, 0xee, 0x5a, 0x5e, 0x55, 0x2d, 0x85, 0xe6, + 0xb2, 0x1e, 0xd3, 0x7b, 0xd5, 0x24, 0x2b, 0x86, 0x6d, 0xda, 0xeb, 0xda, 0xa7, 0x65, 0x8f, 0x9e, + 0x0c, 0x47, 0x4c, 0xbb, 0x18, 0x31, 0x6d, 0x32, 0x62, 0xe4, 0x4b, 0xce, 0xc8, 0x8f, 0x9c, 0x91, + 0x9f, 0x39, 0x23, 0xc3, 0x9c, 0x91, 0x5f, 0x39, 0x23, 0xbf, 0x73, 0xa6, 0x4d, 0x72, 0x46, 0xbe, + 0x8d, 0x99, 0x36, 0x1c, 0x33, 0xed, 0x62, 0xcc, 0xb4, 0xd3, 0x5a, 0xf1, 0xee, 0x74, 0x37, 0xe5, + 0xc3, 0xf3, 0xf0, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xba, 0xb3, 0x18, 0x70, 0x86, 0x04, 0x00, + 0x00, } func (this *CredentialsRequest) Equal(that interface{}) bool { @@ -199,34 +554,279 @@ func (this *CredentialsResponse) Equal(that interface{}) bool { } return true } -func (this *CredentialsRequest) GoString() string { - if this == nil { - return "nil" +func (this *FetchTokenRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil } - s := make([]string, 0, 5) - s = append(s, "&auth.CredentialsRequest{") - s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *CredentialsResponse) GoString() string { - if this == nil { - return "nil" + + that1, ok := that.(*FetchTokenRequest) + if !ok { + that2, ok := that.(FetchTokenRequest) + if ok { + that1 = &that2 + } else { + return false + } } - s := make([]string, 0, 6) - s = append(s, "&auth.CredentialsResponse{") - s = append(s, "Username: "+fmt.Sprintf("%#v", this.Username)+",\n") - s = append(s, "Secret: "+fmt.Sprintf("%#v", this.Secret)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringAuth(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + if that1 == nil { + return this == nil + } else if this == nil { + return false } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) + if this.ClientID != that1.ClientID { + return false + } + if this.Host != that1.Host { + return false + } + if this.Realm != that1.Realm { + return false + } + if this.Service != that1.Service { + return false + } + if len(this.Scopes) != len(that1.Scopes) { + return false + } + for i := range this.Scopes { + if this.Scopes[i] != that1.Scopes[i] { + return false + } + } + return true +} +func (this *FetchTokenResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FetchTokenResponse) + if !ok { + that2, ok := that.(FetchTokenResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Token != that1.Token { + return false + } + if this.ExpiresIn != that1.ExpiresIn { + return false + } + if this.IssuedAt != that1.IssuedAt { + return false + } + return true +} +func (this *GetTokenAuthorityRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GetTokenAuthorityRequest) + if !ok { + that2, ok := that.(GetTokenAuthorityRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Host != that1.Host { + return false + } + if !bytes.Equal(this.Salt, that1.Salt) { + return false + } + return true +} +func (this *GetTokenAuthorityResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GetTokenAuthorityResponse) + if !ok { + that2, ok := that.(GetTokenAuthorityResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.PublicKey, that1.PublicKey) { + return false + } + return true +} +func (this *VerifyTokenAuthorityRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*VerifyTokenAuthorityRequest) + if !ok { + that2, ok := that.(VerifyTokenAuthorityRequest) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Host != that1.Host { + return false + } + if !bytes.Equal(this.Payload, that1.Payload) { + return false + } + if !bytes.Equal(this.Salt, that1.Salt) { + return false + } + return true +} +func (this *VerifyTokenAuthorityResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*VerifyTokenAuthorityResponse) + if !ok { + that2, ok := that.(VerifyTokenAuthorityResponse) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Signed, that1.Signed) { + return false + } + return true +} +func (this *CredentialsRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&auth.CredentialsRequest{") + s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CredentialsResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&auth.CredentialsResponse{") + s = append(s, "Username: "+fmt.Sprintf("%#v", this.Username)+",\n") + s = append(s, "Secret: "+fmt.Sprintf("%#v", this.Secret)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FetchTokenRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&auth.FetchTokenRequest{") + s = append(s, "ClientID: "+fmt.Sprintf("%#v", this.ClientID)+",\n") + s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") + s = append(s, "Realm: "+fmt.Sprintf("%#v", this.Realm)+",\n") + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + s = append(s, "Scopes: "+fmt.Sprintf("%#v", this.Scopes)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FetchTokenResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&auth.FetchTokenResponse{") + s = append(s, "Token: "+fmt.Sprintf("%#v", this.Token)+",\n") + s = append(s, "ExpiresIn: "+fmt.Sprintf("%#v", this.ExpiresIn)+",\n") + s = append(s, "IssuedAt: "+fmt.Sprintf("%#v", this.IssuedAt)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GetTokenAuthorityRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&auth.GetTokenAuthorityRequest{") + s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") + s = append(s, "Salt: "+fmt.Sprintf("%#v", this.Salt)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GetTokenAuthorityResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&auth.GetTokenAuthorityResponse{") + s = append(s, "PublicKey: "+fmt.Sprintf("%#v", this.PublicKey)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *VerifyTokenAuthorityRequest) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&auth.VerifyTokenAuthorityRequest{") + s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") + s = append(s, "Payload: "+fmt.Sprintf("%#v", this.Payload)+",\n") + s = append(s, "Salt: "+fmt.Sprintf("%#v", this.Salt)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *VerifyTokenAuthorityResponse) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&auth.VerifyTokenAuthorityResponse{") + s = append(s, "Signed: "+fmt.Sprintf("%#v", this.Signed)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAuth(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) } // Reference imports to suppress errors if they are not otherwise used. @@ -242,6 +842,9 @@ const _ = grpc.SupportPackageIsVersion4 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type AuthClient interface { Credentials(ctx context.Context, in *CredentialsRequest, opts ...grpc.CallOption) (*CredentialsResponse, error) + FetchToken(ctx context.Context, in *FetchTokenRequest, opts ...grpc.CallOption) (*FetchTokenResponse, error) + GetTokenAuthority(ctx context.Context, in *GetTokenAuthorityRequest, opts ...grpc.CallOption) (*GetTokenAuthorityResponse, error) + VerifyTokenAuthority(ctx context.Context, in *VerifyTokenAuthorityRequest, opts ...grpc.CallOption) (*VerifyTokenAuthorityResponse, error) } type authClient struct { @@ -261,9 +864,39 @@ func (c *authClient) Credentials(ctx context.Context, in *CredentialsRequest, op return out, nil } +func (c *authClient) FetchToken(ctx context.Context, in *FetchTokenRequest, opts ...grpc.CallOption) (*FetchTokenResponse, error) { + out := new(FetchTokenResponse) + err := c.cc.Invoke(ctx, "/moby.filesync.v1.Auth/FetchToken", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) GetTokenAuthority(ctx context.Context, in *GetTokenAuthorityRequest, opts ...grpc.CallOption) (*GetTokenAuthorityResponse, error) { + out := new(GetTokenAuthorityResponse) + err := c.cc.Invoke(ctx, "/moby.filesync.v1.Auth/GetTokenAuthority", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *authClient) VerifyTokenAuthority(ctx context.Context, in *VerifyTokenAuthorityRequest, opts ...grpc.CallOption) (*VerifyTokenAuthorityResponse, error) { + out := new(VerifyTokenAuthorityResponse) + err := c.cc.Invoke(ctx, "/moby.filesync.v1.Auth/VerifyTokenAuthority", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // AuthServer is the server API for Auth service. type AuthServer interface { Credentials(context.Context, *CredentialsRequest) (*CredentialsResponse, error) + FetchToken(context.Context, *FetchTokenRequest) (*FetchTokenResponse, error) + GetTokenAuthority(context.Context, *GetTokenAuthorityRequest) (*GetTokenAuthorityResponse, error) + VerifyTokenAuthority(context.Context, *VerifyTokenAuthorityRequest) (*VerifyTokenAuthorityResponse, error) } // UnimplementedAuthServer can be embedded to have forward compatible implementations. @@ -273,6 +906,15 @@ type UnimplementedAuthServer struct { func (*UnimplementedAuthServer) Credentials(ctx context.Context, req *CredentialsRequest) (*CredentialsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Credentials not implemented") } +func (*UnimplementedAuthServer) FetchToken(ctx context.Context, req *FetchTokenRequest) (*FetchTokenResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FetchToken not implemented") +} +func (*UnimplementedAuthServer) GetTokenAuthority(ctx context.Context, req *GetTokenAuthorityRequest) (*GetTokenAuthorityResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTokenAuthority not implemented") +} +func (*UnimplementedAuthServer) VerifyTokenAuthority(ctx context.Context, req *VerifyTokenAuthorityRequest) (*VerifyTokenAuthorityResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VerifyTokenAuthority not implemented") +} func RegisterAuthServer(s *grpc.Server, srv AuthServer) { s.RegisterService(&_Auth_serviceDesc, srv) @@ -296,6 +938,60 @@ func _Auth_Credentials_Handler(srv interface{}, ctx context.Context, dec func(in return interceptor(ctx, in, info, handler) } +func _Auth_FetchToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FetchTokenRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).FetchToken(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.filesync.v1.Auth/FetchToken", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).FetchToken(ctx, req.(*FetchTokenRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_GetTokenAuthority_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTokenAuthorityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).GetTokenAuthority(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.filesync.v1.Auth/GetTokenAuthority", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).GetTokenAuthority(ctx, req.(*GetTokenAuthorityRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Auth_VerifyTokenAuthority_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifyTokenAuthorityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AuthServer).VerifyTokenAuthority(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.filesync.v1.Auth/VerifyTokenAuthority", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AuthServer).VerifyTokenAuthority(ctx, req.(*VerifyTokenAuthorityRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Auth_serviceDesc = grpc.ServiceDesc{ ServiceName: "moby.filesync.v1.Auth", HandlerType: (*AuthServer)(nil), @@ -304,6 +1000,18 @@ var _Auth_serviceDesc = grpc.ServiceDesc{ MethodName: "Credentials", Handler: _Auth_Credentials_Handler, }, + { + MethodName: "FetchToken", + Handler: _Auth_FetchToken_Handler, + }, + { + MethodName: "GetTokenAuthority", + Handler: _Auth_GetTokenAuthority_Handler, + }, + { + MethodName: "VerifyTokenAuthority", + Handler: _Auth_VerifyTokenAuthority_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "auth.proto", @@ -376,13 +1084,254 @@ func (m *CredentialsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { - offset -= sovAuth(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *FetchTokenRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FetchTokenRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FetchTokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Service) > 0 { + i -= len(m.Service) + copy(dAtA[i:], m.Service) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Service))) + i-- + dAtA[i] = 0x22 + } + if len(m.Realm) > 0 { + i -= len(m.Realm) + copy(dAtA[i:], m.Realm) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Realm))) + i-- + dAtA[i] = 0x1a + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x12 + } + if len(m.ClientID) > 0 { + i -= len(m.ClientID) + copy(dAtA[i:], m.ClientID) + i = encodeVarintAuth(dAtA, i, uint64(len(m.ClientID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FetchTokenResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FetchTokenResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FetchTokenResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IssuedAt != 0 { + i = encodeVarintAuth(dAtA, i, uint64(m.IssuedAt)) + i-- + dAtA[i] = 0x18 + } + if m.ExpiresIn != 0 { + i = encodeVarintAuth(dAtA, i, uint64(m.ExpiresIn)) + i-- + dAtA[i] = 0x10 + } + if len(m.Token) > 0 { + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetTokenAuthorityRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTokenAuthorityRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetTokenAuthorityRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Salt) > 0 { + i -= len(m.Salt) + copy(dAtA[i:], m.Salt) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Salt))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetTokenAuthorityResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTokenAuthorityResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetTokenAuthorityResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PublicKey) > 0 { + i -= len(m.PublicKey) + copy(dAtA[i:], m.PublicKey) + i = encodeVarintAuth(dAtA, i, uint64(len(m.PublicKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VerifyTokenAuthorityRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VerifyTokenAuthorityRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VerifyTokenAuthorityRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Salt) > 0 { + i -= len(m.Salt) + copy(dAtA[i:], m.Salt) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Salt))) + i-- + dAtA[i] = 0x1a + } + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VerifyTokenAuthorityResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VerifyTokenAuthorityResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VerifyTokenAuthorityResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signed) > 0 { + i -= len(m.Signed) + copy(dAtA[i:], m.Signed) + i = encodeVarintAuth(dAtA, i, uint64(len(m.Signed))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { + offset -= sovAuth(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } dAtA[offset] = uint8(v) return base @@ -417,42 +1366,763 @@ func (m *CredentialsResponse) Size() (n int) { return n } -func sovAuth(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 +func (m *FetchTokenRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientID) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.Host) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.Realm) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.Service) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovAuth(uint64(l)) + } + } + return n } -func sozAuth(x uint64) (n int) { - return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + +func (m *FetchTokenResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Token) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + if m.ExpiresIn != 0 { + n += 1 + sovAuth(uint64(m.ExpiresIn)) + } + if m.IssuedAt != 0 { + n += 1 + sovAuth(uint64(m.IssuedAt)) + } + return n } -func (this *CredentialsRequest) String() string { - if this == nil { - return "nil" + +func (m *GetTokenAuthorityRequest) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&CredentialsRequest{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.Salt) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + return n } -func (this *CredentialsResponse) String() string { - if this == nil { - return "nil" + +func (m *GetTokenAuthorityResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PublicKey) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + return n +} + +func (m *VerifyTokenAuthorityRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + l = len(m.Salt) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + return n +} + +func (m *VerifyTokenAuthorityResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Signed) + if l > 0 { + n += 1 + l + sovAuth(uint64(l)) + } + return n +} + +func sovAuth(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAuth(x uint64) (n int) { + return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CredentialsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CredentialsRequest{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *CredentialsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CredentialsResponse{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *FetchTokenRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FetchTokenRequest{`, + `ClientID:` + fmt.Sprintf("%v", this.ClientID) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Realm:` + fmt.Sprintf("%v", this.Realm) + `,`, + `Service:` + fmt.Sprintf("%v", this.Service) + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `}`, + }, "") + return s +} +func (this *FetchTokenResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FetchTokenResponse{`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `ExpiresIn:` + fmt.Sprintf("%v", this.ExpiresIn) + `,`, + `IssuedAt:` + fmt.Sprintf("%v", this.IssuedAt) + `,`, + `}`, + }, "") + return s +} +func (this *GetTokenAuthorityRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetTokenAuthorityRequest{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Salt:` + fmt.Sprintf("%v", this.Salt) + `,`, + `}`, + }, "") + return s +} +func (this *GetTokenAuthorityResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetTokenAuthorityResponse{`, + `PublicKey:` + fmt.Sprintf("%v", this.PublicKey) + `,`, + `}`, + }, "") + return s +} +func (this *VerifyTokenAuthorityRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VerifyTokenAuthorityRequest{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Payload:` + fmt.Sprintf("%v", this.Payload) + `,`, + `Salt:` + fmt.Sprintf("%v", this.Salt) + `,`, + `}`, + }, "") + return s +} +func (this *VerifyTokenAuthorityResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VerifyTokenAuthorityResponse{`, + `Signed:` + fmt.Sprintf("%v", this.Signed) + `,`, + `}`, + }, "") + return s +} +func valueToStringAuth(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CredentialsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CredentialsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CredentialsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CredentialsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CredentialsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CredentialsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FetchTokenRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FetchTokenRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FetchTokenRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Realm", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Realm = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FetchTokenResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FetchTokenResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FetchTokenResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpiresIn", wireType) + } + m.ExpiresIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpiresIn |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IssuedAt", wireType) + } + m.IssuedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IssuedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - s := strings.Join([]string{`&CredentialsResponse{`, - `Username:` + fmt.Sprintf("%v", this.Username) + `,`, - `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, - `}`, - }, "") - return s -} -func valueToStringAuth(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *CredentialsRequest) Unmarshal(dAtA []byte) error { +func (m *GetTokenAuthorityRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -475,10 +2145,10 @@ func (m *CredentialsRequest) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CredentialsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetTokenAuthorityRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CredentialsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetTokenAuthorityRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -513,6 +2183,40 @@ func (m *CredentialsRequest) Unmarshal(dAtA []byte) error { } m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Salt", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Salt = append(m.Salt[:0], dAtA[iNdEx:postIndex]...) + if m.Salt == nil { + m.Salt = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipAuth(dAtA[iNdEx:]) @@ -537,7 +2241,7 @@ func (m *CredentialsRequest) Unmarshal(dAtA []byte) error { } return nil } -func (m *CredentialsResponse) Unmarshal(dAtA []byte) error { +func (m *GetTokenAuthorityResponse) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -560,15 +2264,102 @@ func (m *CredentialsResponse) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CredentialsResponse: wiretype end group for non-group") + return fmt.Errorf("proto: GetTokenAuthorityResponse: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CredentialsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetTokenAuthorityResponse: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PublicKey = append(m.PublicKey[:0], dAtA[iNdEx:postIndex]...) + if m.PublicKey == nil { + m.PublicKey = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VerifyTokenAuthorityRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VerifyTokenAuthorityRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VerifyTokenAuthorityRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -596,13 +2387,13 @@ func (m *CredentialsResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Username = string(dAtA[iNdEx:postIndex]) + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowAuth @@ -612,23 +2403,146 @@ func (m *CredentialsResponse) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthAuth } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthAuth } if postIndex > l { return io.ErrUnexpectedEOF } - m.Secret = string(dAtA[iNdEx:postIndex]) + m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) + if m.Payload == nil { + m.Payload = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Salt", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Salt = append(m.Salt[:0], dAtA[iNdEx:postIndex]...) + if m.Salt == nil { + m.Salt = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAuth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VerifyTokenAuthorityResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VerifyTokenAuthorityResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VerifyTokenAuthorityResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signed", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAuth + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthAuth + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signed = append(m.Signed[:0], dAtA[iNdEx:postIndex]...) + if m.Signed == nil { + m.Signed = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.proto b/vendor/github.com/moby/buildkit/session/auth/auth.proto index 593312747950bab1faed694ab1d41d0a9b082407..139b0d0e3903dd14775854c6a3d9cdbe75a7de36 100644 --- a/vendor/github.com/moby/buildkit/session/auth/auth.proto +++ b/vendor/github.com/moby/buildkit/session/auth/auth.proto @@ -6,9 +6,11 @@ option go_package = "auth"; service Auth{ rpc Credentials(CredentialsRequest) returns (CredentialsResponse); + rpc FetchToken(FetchTokenRequest) returns (FetchTokenResponse); + rpc GetTokenAuthority(GetTokenAuthorityRequest) returns (GetTokenAuthorityResponse); + rpc VerifyTokenAuthority(VerifyTokenAuthorityRequest) returns (VerifyTokenAuthorityResponse); } - message CredentialsRequest { string Host = 1; } @@ -17,3 +19,36 @@ message CredentialsResponse { string Username = 1; string Secret = 2; } + +message FetchTokenRequest { + string ClientID = 1; + string Host = 2; + string Realm = 3; + string Service = 4; + repeated string Scopes = 5; +} + +message FetchTokenResponse { + string Token = 1; + int64 ExpiresIn = 2; // seconds + int64 IssuedAt = 3; // timestamp +} + +message GetTokenAuthorityRequest { + string Host = 1; + bytes Salt = 2; +} + +message GetTokenAuthorityResponse { + bytes PublicKey = 1; +} + +message VerifyTokenAuthorityRequest { + string Host = 1; + bytes Payload = 2; + bytes Salt = 3; +} + +message VerifyTokenAuthorityResponse { + bytes Signed = 1; +} diff --git a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go index ac255e4e1a12c20769090fb960577b4ac0fadbe3..c3104dbfa0d0ea0452b419afb101fdd28836e880 100644 --- a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go +++ b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go @@ -2,6 +2,7 @@ package filesync import ( "bufio" + "context" io "io" "os" "time" @@ -13,7 +14,13 @@ import ( "google.golang.org/grpc" ) -func sendDiffCopy(stream grpc.Stream, fs fsutil.FS, progress progressCb) error { +type Stream interface { + Context() context.Context + SendMsg(m interface{}) error + RecvMsg(m interface{}) error +} + +func sendDiffCopy(stream Stream, fs fsutil.FS, progress progressCb) error { return errors.WithStack(fsutil.Send(stream.Context(), stream, fs, progress)) } @@ -63,7 +70,7 @@ func (wc *streamWriterCloser) Close() error { return nil } -func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb, filter func(string, *fstypes.Stat) bool) error { +func recvDiffCopy(ds grpc.ClientStream, dest string, cu CacheUpdater, progress progressCb, filter func(string, *fstypes.Stat) bool) error { st := time.Now() defer func() { logrus.Debugf("diffcopy took: %v", time.Since(st)) @@ -83,7 +90,7 @@ func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progres })) } -func syncTargetDiffCopy(ds grpc.Stream, dest string) error { +func syncTargetDiffCopy(ds grpc.ServerStream, dest string) error { if err := os.MkdirAll(dest, 0700); err != nil { return errors.Wrapf(err, "failed to create synctarget dest dir %s", dest) } @@ -101,7 +108,7 @@ func syncTargetDiffCopy(ds grpc.Stream, dest string) error { })) } -func writeTargetFile(ds grpc.Stream, wc io.WriteCloser) error { +func writeTargetFile(ds grpc.ServerStream, wc io.WriteCloser) error { for { bm := BytesMessage{} if err := ds.RecvMsg(&bm); err != nil { diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.go index 51dd3c5383b6b19836051b48908be1ae5fc8c216..af62d1c2c6f086fccece325555e0e1a2598dcc90 100644 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync.go +++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.go @@ -129,8 +129,8 @@ type progressCb func(int, bool) type protocol struct { name string - sendFn func(stream grpc.Stream, fs fsutil.FS, progress progressCb) error - recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb, mapFunc func(string, *fstypes.Stat) bool) error + sendFn func(stream Stream, fs fsutil.FS, progress progressCb) error + recvFn func(stream grpc.ClientStream, destDir string, cu CacheUpdater, progress progressCb, mapFunc func(string, *fstypes.Stat) bool) error } func isProtoSupported(p string) bool { diff --git a/vendor/github.com/moby/buildkit/session/group.go b/vendor/github.com/moby/buildkit/session/group.go index 88409bf8be3642779f12e7d2c542ae276b3d7436..4b9ba221f5feaf693c66c01cc0493af6e5b5a374 100644 --- a/vendor/github.com/moby/buildkit/session/group.go +++ b/vendor/github.com/moby/buildkit/session/group.go @@ -74,7 +74,7 @@ func (sm *Manager) Any(ctx context.Context, g Group, f func(context.Context, str timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() - c, err := sm.Get(timeoutCtx, id) + c, err := sm.Get(timeoutCtx, id, false) if err != nil { lastErr = err continue diff --git a/vendor/github.com/moby/buildkit/session/grpc.go b/vendor/github.com/moby/buildkit/session/grpc.go index 02870ca16d099befbf6cf3d5e4c7f240848544de..728ea2e17c30d15215e90ed9ff057125f3fff592 100644 --- a/vendor/github.com/moby/buildkit/session/grpc.go +++ b/vendor/github.com/moby/buildkit/session/grpc.go @@ -31,7 +31,7 @@ func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc. var stream []grpc.StreamClientInterceptor var dialCount int64 - dialer := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) { + dialer := grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { if c := atomic.AddInt64(&dialCount, 1); c > 1 { return nil, errors.Errorf("only one connection allowed") } @@ -64,7 +64,7 @@ func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc. dialOpts = append(dialOpts, grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(stream...))) } - cc, err := grpc.DialContext(ctx, "", dialOpts...) + cc, err := grpc.DialContext(ctx, "localhost", dialOpts...) if err != nil { return nil, nil, errors.Wrap(err, "failed to create grpc client") } diff --git a/vendor/github.com/moby/buildkit/session/grpchijack/dial.go b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go index 2486563e333f4e561a0def7e9e76386fef5b5ea7..5f0cf3d77a398372d68712e50f6d13ffc7f6f07f 100644 --- a/vendor/github.com/moby/buildkit/session/grpchijack/dial.go +++ b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go @@ -33,21 +33,26 @@ func Dialer(api controlapi.ControlClient) session.Dialer { } } -func streamToConn(stream grpc.Stream) (net.Conn, <-chan struct{}) { +type stream interface { + Context() context.Context + SendMsg(m interface{}) error + RecvMsg(m interface{}) error +} + +func streamToConn(stream stream) (net.Conn, <-chan struct{}) { closeCh := make(chan struct{}) c := &conn{stream: stream, buf: make([]byte, 32*1<<10), closeCh: closeCh} return c, closeCh } type conn struct { - stream grpc.Stream + stream stream buf []byte lastBuf []byte closedOnce sync.Once readMu sync.Mutex writeMu sync.Mutex - err error closeCh chan struct{} } diff --git a/vendor/github.com/moby/buildkit/session/manager.go b/vendor/github.com/moby/buildkit/session/manager.go index e01b047e231239fcd08d8c312feb5a0a7e77a8dc..edac93063c38fbd9b4be0b2261d2512a13f14fa0 100644 --- a/vendor/github.com/moby/buildkit/session/manager.go +++ b/vendor/github.com/moby/buildkit/session/manager.go @@ -149,7 +149,7 @@ func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[strin } // Get returns a session by ID -func (sm *Manager) Get(ctx context.Context, id string) (Caller, error) { +func (sm *Manager) Get(ctx context.Context, id string, noWait bool) (Caller, error) { // session prefix is used to identify vertexes with different contexts so // they would not collide, but for lookup we don't need the prefix if p := strings.SplitN(id, ":", 2); len(p) == 2 && len(p[1]) > 0 { @@ -180,7 +180,7 @@ func (sm *Manager) Get(ctx context.Context, id string) (Caller, error) { } var ok bool c, ok = sm.sessions[id] - if !ok || c.closed() { + if (!ok || c.closed()) && !noWait { sm.updateCondition.Wait() continue } @@ -188,6 +188,10 @@ func (sm *Manager) Get(ctx context.Context, id string) (Caller, error) { break } + if c == nil { + return nil, nil + } + return c, nil } diff --git a/vendor/github.com/moby/buildkit/session/sshforward/copy.go b/vendor/github.com/moby/buildkit/session/sshforward/copy.go index 85366f19aff5b533b137904dc6f9e99330fac279..6db414894923f1f47c34be1fef6327476760bae1 100644 --- a/vendor/github.com/moby/buildkit/session/sshforward/copy.go +++ b/vendor/github.com/moby/buildkit/session/sshforward/copy.go @@ -6,10 +6,14 @@ import ( "github.com/pkg/errors" context "golang.org/x/net/context" "golang.org/x/sync/errgroup" - "google.golang.org/grpc" ) -func Copy(ctx context.Context, conn io.ReadWriteCloser, stream grpc.Stream, closeStream func() error) error { +type Stream interface { + SendMsg(m interface{}) error + RecvMsg(m interface{}) error +} + +func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStream func() error) error { g, ctx := errgroup.WithContext(ctx) g.Go(func() (retErr error) { diff --git a/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go b/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go index a5cd1a2575ce77454da5820bd07acd86e26a18e4..df2e99b6c180780915dafaf96a689976b1c160e9 100644 --- a/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go +++ b/vendor/github.com/moby/buildkit/snapshot/localmounter_windows.go @@ -25,12 +25,27 @@ func (lm *localMounter) Mount() (string, error) { return "", errors.Wrapf(errdefs.ErrNotImplemented, "request to mount %d layers, only 1 is supported", len(lm.mounts)) } + m := lm.mounts[0] + + if m.Type == "bind" || m.Type == "rbind" { + ro := false + for _, opt := range m.Options { + if opt == "ro" { + ro = true + break + } + } + if !ro { + return m.Source, nil + } + } + // Windows mounts always activate in-place, so the target of the mount must be the source directory. // See https://github.com/containerd/containerd/pull/2366 - dir := lm.mounts[0].Source + dir := m.Source - if err := lm.mounts[0].Mount(dir); err != nil { - return "", errors.Wrapf(err, "failed to mount in-place: %v", lm.mounts[0]) + if err := m.Mount(dir); err != nil { + return "", errors.Wrapf(err, "failed to mount in-place: %v", m) } lm.target = dir return lm.target, nil diff --git a/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go b/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go index 19755816cd5ae7e6f1b1e2733510284b47330984..515feffbf0d264ad37c2965b8d8bd96f35bde88f 100644 --- a/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go +++ b/vendor/github.com/moby/buildkit/solver/bboltcachestorage/storage.go @@ -233,12 +233,6 @@ func (s *Store) releaseHelper(tx *bolt.Tx, id, resultID string) error { } } - links := tx.Bucket([]byte(resultBucket)) - if results == nil { - return nil - } - links = links.Bucket([]byte(id)) - return s.emptyBranchWithParents(tx, []byte(id)) } diff --git a/vendor/github.com/moby/buildkit/solver/cacheopts.go b/vendor/github.com/moby/buildkit/solver/cacheopts.go new file mode 100644 index 0000000000000000000000000000000000000000..7c58d82112cb0638805a54e20204e905f0c3a51a --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/cacheopts.go @@ -0,0 +1,88 @@ +package solver + +import ( + "context" + + digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +type CacheOpts map[interface{}]interface{} + +type cacheOptGetterKey struct{} + +func CacheOptGetterOf(ctx context.Context) func(keys ...interface{}) map[interface{}]interface{} { + if v := ctx.Value(cacheOptGetterKey{}); v != nil { + if getter, ok := v.(func(keys ...interface{}) map[interface{}]interface{}); ok { + return getter + } + } + return nil +} + +func withAncestorCacheOpts(ctx context.Context, start *state) context.Context { + return context.WithValue(ctx, cacheOptGetterKey{}, func(keys ...interface{}) map[interface{}]interface{} { + keySet := make(map[interface{}]struct{}) + for _, k := range keys { + keySet[k] = struct{}{} + } + values := make(map[interface{}]interface{}) + walkAncestors(start, func(st *state) bool { + if st.clientVertex.Error != "" { + // don't use values from cancelled or otherwise error'd vertexes + return false + } + for _, res := range st.op.cacheRes { + if res.Opts == nil { + continue + } + for k := range keySet { + if v, ok := res.Opts[k]; ok { + values[k] = v + delete(keySet, k) + if len(keySet) == 0 { + return true + } + } + } + } + return false + }) + return values + }) +} + +func walkAncestors(start *state, f func(*state) bool) { + stack := [][]*state{{start}} + cache := make(map[digest.Digest]struct{}) + for len(stack) > 0 { + sts := stack[len(stack)-1] + if len(sts) == 0 { + stack = stack[:len(stack)-1] + continue + } + st := sts[len(sts)-1] + stack[len(stack)-1] = sts[:len(sts)-1] + if st == nil { + continue + } + if _, ok := cache[st.origDigest]; ok { + continue + } + cache[st.origDigest] = struct{}{} + if shouldStop := f(st); shouldStop { + return + } + stack = append(stack, []*state{}) + for _, parentDgst := range st.clientVertex.Inputs { + st.solver.mu.RLock() + parent := st.solver.actives[parentDgst] + st.solver.mu.RUnlock() + if parent == nil { + logrus.Warnf("parent %q not found in active job list during cache opt search", parentDgst) + continue + } + stack[len(stack)-1] = append(stack[len(stack)-1], parent) + } + } +} diff --git a/vendor/github.com/moby/buildkit/solver/cachestorage.go b/vendor/github.com/moby/buildkit/solver/cachestorage.go index cc1e21134638b4abeeed46a97b90c14336094a72..12797223c6801ab101b1e7e0235d33b29f2ebf4b 100644 --- a/vendor/github.com/moby/buildkit/solver/cachestorage.go +++ b/vendor/github.com/moby/buildkit/solver/cachestorage.go @@ -4,6 +4,7 @@ import ( "context" "time" + "github.com/moby/buildkit/session" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -46,6 +47,6 @@ type CacheInfoLink struct { type CacheResultStorage interface { Save(Result, time.Time) (CacheResult, error) Load(ctx context.Context, res CacheResult) (Result, error) - LoadRemote(ctx context.Context, res CacheResult) (*Remote, error) + LoadRemote(ctx context.Context, res CacheResult, s session.Group) (*Remote, error) Exists(id string) bool } diff --git a/vendor/github.com/moby/buildkit/solver/edge.go b/vendor/github.com/moby/buildkit/solver/edge.go index 52613adbc54208204a09c37a1c40d9c726a4c942..6ebf3a74db5ba43de330831e795995c9c46f6707 100644 --- a/vendor/github.com/moby/buildkit/solver/edge.go +++ b/vendor/github.com/moby/buildkit/solver/edge.go @@ -76,8 +76,6 @@ type dep struct { edgeState index Index keyMap map[string]*CacheKey - desiredState edgeStatusType - e *edge slowCacheReq pipe.Receiver slowCacheComplete bool slowCacheFoundKey bool @@ -119,7 +117,7 @@ type edgeRequest struct { // incrementReferenceCount increases the number of times release needs to be // called to release the edge. Called on merging edges. func (e *edge) incrementReferenceCount() { - e.releaserCount += 1 + e.releaserCount++ } // release releases the edge resources @@ -225,6 +223,14 @@ func (e *edge) slowCacheFunc(dep *dep) ResultBasedCacheFunc { return e.cacheMap.Deps[int(dep.index)].ComputeDigestFunc } +// preprocessFunc returns result based cache func +func (e *edge) preprocessFunc(dep *dep) PreprocessFunc { + if e.cacheMap == nil { + return nil + } + return e.cacheMap.Deps[int(dep.index)].PreprocessFunc +} + // allDepsHaveKeys checks if all dependencies have at least one key. used for // determining if there is enough data for combining cache key for edge func (e *edge) allDepsHaveKeys(matching bool) bool { @@ -489,17 +495,20 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) { e.err = upt.Status().Err } } else if !dep.slowCacheComplete { - k := NewCacheKey(upt.Status().Value.(digest.Digest), -1) - dep.slowCacheKey = &ExportableCacheKey{CacheKey: k, Exporter: &exporter{k: k}} - slowKeyExp := CacheKeyWithSelector{CacheKey: *dep.slowCacheKey} - defKeys := make([]CacheKeyWithSelector, 0, len(dep.result.CacheKeys())) - for _, dk := range dep.result.CacheKeys() { - defKeys = append(defKeys, CacheKeyWithSelector{CacheKey: dk, Selector: e.cacheMap.Deps[i].Selector}) - } - dep.slowCacheFoundKey = e.probeCache(dep, []CacheKeyWithSelector{slowKeyExp}) + dgst := upt.Status().Value.(digest.Digest) + if e.cacheMap.Deps[int(dep.index)].ComputeDigestFunc != nil && dgst != "" { + k := NewCacheKey(dgst, -1) + dep.slowCacheKey = &ExportableCacheKey{CacheKey: k, Exporter: &exporter{k: k}} + slowKeyExp := CacheKeyWithSelector{CacheKey: *dep.slowCacheKey} + defKeys := make([]CacheKeyWithSelector, 0, len(dep.result.CacheKeys())) + for _, dk := range dep.result.CacheKeys() { + defKeys = append(defKeys, CacheKeyWithSelector{CacheKey: dk, Selector: e.cacheMap.Deps[i].Selector}) + } + dep.slowCacheFoundKey = e.probeCache(dep, []CacheKeyWithSelector{slowKeyExp}) - // connect def key to slow key - e.op.Cache().Query(append(defKeys, slowKeyExp), dep.index, e.cacheMap.Digest, e.edge.Index) + // connect def key to slow key + e.op.Cache().Query(append(defKeys, slowKeyExp), dep.index, e.cacheMap.Digest, e.edge.Index) + } dep.slowCacheComplete = true e.keysDidChange = true @@ -585,7 +594,7 @@ func (e *edge) recalcCurrentState() { stHigh := edgeStatusCacheSlow // maximum possible state if e.cacheMap != nil { for _, dep := range e.deps { - isSlowIncomplete := e.slowCacheFunc(dep) != nil && (dep.state == edgeStatusCacheSlow || (dep.state == edgeStatusComplete && !dep.slowCacheComplete)) + isSlowIncomplete := (e.slowCacheFunc(dep) != nil || e.preprocessFunc(dep) != nil) && (dep.state == edgeStatusCacheSlow || (dep.state == edgeStatusComplete && !dep.slowCacheComplete)) if dep.state > stLow && len(dep.keyMap) == 0 && !isSlowIncomplete { stLow = dep.state @@ -806,15 +815,16 @@ func (e *edge) createInputRequests(desiredState edgeStatusType, f *pipeFactory, } } // initialize function to compute cache key based on dependency result - if dep.state == edgeStatusComplete && dep.slowCacheReq == nil && e.slowCacheFunc(dep) != nil && e.cacheMap != nil { + if dep.state == edgeStatusComplete && dep.slowCacheReq == nil && (e.slowCacheFunc(dep) != nil || e.preprocessFunc(dep) != nil) && e.cacheMap != nil { + pfn := e.preprocessFunc(dep) fn := e.slowCacheFunc(dep) res := dep.result - func(fn ResultBasedCacheFunc, res Result, index Index) { + func(pfn PreprocessFunc, fn ResultBasedCacheFunc, res Result, index Index) { dep.slowCacheReq = f.NewFuncRequest(func(ctx context.Context) (interface{}, error) { - v, err := e.op.CalcSlowCache(ctx, index, fn, res) + v, err := e.op.CalcSlowCache(ctx, index, pfn, fn, res) return v, errors.Wrap(err, "failed to compute cache key") }) - }(fn, res, dep.index) + }(pfn, fn, res, dep.index) addedNew = true } } @@ -867,6 +877,7 @@ func (e *edge) loadCache(ctx context.Context) (interface{}, error) { logrus.Debugf("load cache for %s with %s", e.edge.Vertex.Name(), rec.ID) res, err := e.op.LoadCache(ctx, rec) if err != nil { + logrus.Debugf("load cache for %s err: %v", e.edge.Vertex.Name(), err) return nil, errors.Wrap(err, "failed to load cache") } diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go b/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go index f179e9958a20b7ecf3f920ee7761af89dc01229f..4375ebde3041a37746234403fda3b5f06f43cee2 100644 --- a/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go +++ b/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go @@ -105,25 +105,105 @@ func (m *Source) GetRanges() []*pb.Range { return nil } +type FrontendCap struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FrontendCap) Reset() { *m = FrontendCap{} } +func (m *FrontendCap) String() string { return proto.CompactTextString(m) } +func (*FrontendCap) ProtoMessage() {} +func (*FrontendCap) Descriptor() ([]byte, []int) { + return fileDescriptor_689dc58a5060aff5, []int{2} +} +func (m *FrontendCap) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FrontendCap.Unmarshal(m, b) +} +func (m *FrontendCap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FrontendCap.Marshal(b, m, deterministic) +} +func (m *FrontendCap) XXX_Merge(src proto.Message) { + xxx_messageInfo_FrontendCap.Merge(m, src) +} +func (m *FrontendCap) XXX_Size() int { + return xxx_messageInfo_FrontendCap.Size(m) +} +func (m *FrontendCap) XXX_DiscardUnknown() { + xxx_messageInfo_FrontendCap.DiscardUnknown(m) +} + +var xxx_messageInfo_FrontendCap proto.InternalMessageInfo + +func (m *FrontendCap) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +type Subrequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Subrequest) Reset() { *m = Subrequest{} } +func (m *Subrequest) String() string { return proto.CompactTextString(m) } +func (*Subrequest) ProtoMessage() {} +func (*Subrequest) Descriptor() ([]byte, []int) { + return fileDescriptor_689dc58a5060aff5, []int{3} +} +func (m *Subrequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Subrequest.Unmarshal(m, b) +} +func (m *Subrequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Subrequest.Marshal(b, m, deterministic) +} +func (m *Subrequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subrequest.Merge(m, src) +} +func (m *Subrequest) XXX_Size() int { + return xxx_messageInfo_Subrequest.Size(m) +} +func (m *Subrequest) XXX_DiscardUnknown() { + xxx_messageInfo_Subrequest.DiscardUnknown(m) +} + +var xxx_messageInfo_Subrequest proto.InternalMessageInfo + +func (m *Subrequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func init() { proto.RegisterType((*Vertex)(nil), "errdefs.Vertex") proto.RegisterType((*Source)(nil), "errdefs.Source") + proto.RegisterType((*FrontendCap)(nil), "errdefs.FrontendCap") + proto.RegisterType((*Subrequest)(nil), "errdefs.Subrequest") } func init() { proto.RegisterFile("errdefs.proto", fileDescriptor_689dc58a5060aff5) } var fileDescriptor_689dc58a5060aff5 = []byte{ - // 177 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x2c, 0xcd, 0xc1, 0x8a, 0x83, 0x30, - 0x10, 0x80, 0x61, 0xdc, 0x5d, 0xb2, 0x18, 0xd9, 0x3d, 0xe4, 0x50, 0xa4, 0x27, 0xeb, 0xc9, 0x43, - 0x49, 0xc0, 0x3e, 0x45, 0x4f, 0x85, 0x14, 0x7a, 0x6f, 0x74, 0xb4, 0xa1, 0xea, 0x84, 0x49, 0x2c, - 0xed, 0xdb, 0x17, 0x6d, 0x8e, 0xff, 0x7c, 0x33, 0x0c, 0xff, 0x03, 0xa2, 0x16, 0x3a, 0x2f, 0x1d, - 0x61, 0x40, 0xf1, 0x1b, 0x73, 0xbb, 0xef, 0x6d, 0xb8, 0xcd, 0x46, 0x36, 0x38, 0xaa, 0x11, 0xcd, - 0x4b, 0x99, 0xd9, 0x0e, 0xed, 0xdd, 0x06, 0xe5, 0x71, 0x78, 0x00, 0x29, 0x67, 0x14, 0xba, 0x78, - 0x56, 0x16, 0x9c, 0x5d, 0x80, 0x02, 0x3c, 0xc5, 0x86, 0xb3, 0xd6, 0xf6, 0xe0, 0x43, 0x9e, 0x14, - 0x49, 0x95, 0xea, 0x58, 0xe5, 0x89, 0xb3, 0x33, 0xce, 0xd4, 0x80, 0x28, 0xf9, 0x8f, 0x9d, 0x3a, - 0x5c, 0x3d, 0xab, 0xff, 0xa5, 0x33, 0xf2, 0x23, 0xc7, 0xa9, 0x43, 0xbd, 0x9a, 0xd8, 0x71, 0x46, - 0xd7, 0xa9, 0x07, 0x9f, 0x7f, 0x15, 0xdf, 0x55, 0x56, 0xa7, 0xcb, 0x96, 0x5e, 0x26, 0x3a, 0x82, - 0x61, 0xeb, 0xe7, 0xc3, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x93, 0xb5, 0x8b, 0x2a, 0xc1, 0x00, 0x00, - 0x00, + // 213 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x8e, 0xc1, 0x4a, 0x03, 0x31, + 0x10, 0x86, 0xa9, 0x96, 0x48, 0x67, 0xd1, 0x43, 0x0e, 0x52, 0x3c, 0x6d, 0x73, 0xea, 0x41, 0x36, + 0x50, 0x1f, 0x41, 0x10, 0x3c, 0x09, 0x5b, 0xf0, 0xbe, 0x69, 0x66, 0xd7, 0x60, 0x37, 0x13, 0x27, + 0x89, 0xe8, 0xdb, 0xcb, 0xc6, 0x1c, 0x7b, 0x9b, 0x7f, 0xbe, 0x6f, 0x98, 0x1f, 0x6e, 0x91, 0xd9, + 0xe2, 0x18, 0xbb, 0xc0, 0x94, 0x48, 0xde, 0xd4, 0xf8, 0xf0, 0x38, 0xb9, 0xf4, 0x91, 0x4d, 0x77, + 0xa2, 0x59, 0xcf, 0x64, 0x7e, 0xb5, 0xc9, 0xee, 0x6c, 0x3f, 0x5d, 0xd2, 0x91, 0xce, 0xdf, 0xc8, + 0x3a, 0x18, 0x4d, 0xa1, 0x9e, 0xa9, 0x16, 0xc4, 0x3b, 0x72, 0xc2, 0x1f, 0x79, 0x0f, 0xc2, 0xba, + 0x09, 0x63, 0xda, 0xae, 0xda, 0xd5, 0x7e, 0xd3, 0xd7, 0xa4, 0xde, 0x40, 0x1c, 0x29, 0xf3, 0x09, + 0xa5, 0x82, 0xb5, 0xf3, 0x23, 0x15, 0xde, 0x1c, 0xee, 0xba, 0x60, 0xba, 0x7f, 0xf2, 0xea, 0x47, + 0xea, 0x0b, 0x93, 0x3b, 0x10, 0x3c, 0xf8, 0x09, 0xe3, 0xf6, 0xaa, 0xbd, 0xde, 0x37, 0x87, 0xcd, + 0x62, 0xf5, 0xcb, 0xa6, 0xaf, 0x40, 0xed, 0xa0, 0x79, 0x61, 0xf2, 0x09, 0xbd, 0x7d, 0x1e, 0x82, + 0x94, 0xb0, 0xf6, 0xc3, 0x8c, 0xf5, 0x6b, 0x99, 0x55, 0x0b, 0x70, 0xcc, 0x86, 0xf1, 0x2b, 0x63, + 0x4c, 0x97, 0x0c, 0x23, 0x4a, 0xfd, 0xa7, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x75, 0x4b, 0xfe, + 0xad, 0x06, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.proto b/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.proto index 7e808cbbb713cf66e7312d4e9779d447f6b60c98..f41ed14bb55710243487078ff8f0005e93891c35 100644 --- a/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.proto +++ b/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.proto @@ -12,3 +12,11 @@ message Source { pb.SourceInfo info = 1; repeated pb.Range ranges = 2; } + +message FrontendCap { + string name = 1; +} + +message Subrequest { + string name = 1; +} diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/exec.go b/vendor/github.com/moby/buildkit/solver/errdefs/exec.go new file mode 100644 index 0000000000000000000000000000000000000000..2c6bbf6e6b17ee4617a39bbf93da56d41c230048 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/errdefs/exec.go @@ -0,0 +1,24 @@ +package errdefs + +import fmt "fmt" + +// ExitError will be returned when the container process exits with a non-zero +// exit code. +type ExitError struct { + ExitCode uint32 + Err error +} + +func (err *ExitError) Error() string { + if err.Err != nil { + return err.Err.Error() + } + return fmt.Sprintf("exit code: %d", err.ExitCode) +} + +func (err *ExitError) Unwrap() error { + if err.Err == nil { + return fmt.Errorf("exit code: %d", err.ExitCode) + } + return err.Err +} diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/fronetendcap.go b/vendor/github.com/moby/buildkit/solver/errdefs/fronetendcap.go new file mode 100644 index 0000000000000000000000000000000000000000..e8af9ff2332826da36a5328ec5c1618871beaf91 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/errdefs/fronetendcap.go @@ -0,0 +1,41 @@ +package errdefs + +import ( + fmt "fmt" + + "github.com/containerd/typeurl" + "github.com/moby/buildkit/util/grpcerrors" +) + +func init() { + typeurl.Register((*FrontendCap)(nil), "github.com/moby/buildkit", "errdefs.FrontendCap+json") +} + +type UnsupportedFrontendCapError struct { + FrontendCap + error +} + +func (e *UnsupportedFrontendCapError) Error() string { + msg := fmt.Sprintf("unsupported frontend capability %s", e.FrontendCap.Name) + if e.error != nil { + msg += ": " + e.error.Error() + } + return msg +} + +func (e *UnsupportedFrontendCapError) Unwrap() error { + return e.error +} + +func (e *UnsupportedFrontendCapError) ToProto() grpcerrors.TypedErrorProto { + return &e.FrontendCap +} + +func NewUnsupportedFrontendCapError(name string) error { + return &UnsupportedFrontendCapError{FrontendCap: FrontendCap{Name: name}} +} + +func (v *FrontendCap) WrapError(err error) error { + return &UnsupportedFrontendCapError{error: err, FrontendCap: *v} +} diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/subrequest.go b/vendor/github.com/moby/buildkit/solver/errdefs/subrequest.go new file mode 100644 index 0000000000000000000000000000000000000000..b30eab3f66981a76759e4ddcaac6a8b037c319d2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/errdefs/subrequest.go @@ -0,0 +1,41 @@ +package errdefs + +import ( + fmt "fmt" + + "github.com/containerd/typeurl" + "github.com/moby/buildkit/util/grpcerrors" +) + +func init() { + typeurl.Register((*Subrequest)(nil), "github.com/moby/buildkit", "errdefs.Subrequest+json") +} + +type UnsupportedSubrequestError struct { + Subrequest + error +} + +func (e *UnsupportedSubrequestError) Error() string { + msg := fmt.Sprintf("unsupported request %s", e.Subrequest.Name) + if e.error != nil { + msg += ": " + e.error.Error() + } + return msg +} + +func (e *UnsupportedSubrequestError) Unwrap() error { + return e.error +} + +func (e *UnsupportedSubrequestError) ToProto() grpcerrors.TypedErrorProto { + return &e.Subrequest +} + +func NewUnsupportedSubrequestError(name string) error { + return &UnsupportedSubrequestError{Subrequest: Subrequest{Name: name}} +} + +func (v *Subrequest) WrapError(err error) error { + return &UnsupportedSubrequestError{error: err, Subrequest: *v} +} diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/vertex.go b/vendor/github.com/moby/buildkit/solver/errdefs/vertex.go index 71fdb6cab570a46f40de0d34e2ae9d06c6934a2b..4ec375165d2425b4d107a1d8b6e00d88514625a2 100644 --- a/vendor/github.com/moby/buildkit/solver/errdefs/vertex.go +++ b/vendor/github.com/moby/buildkit/solver/errdefs/vertex.go @@ -1,14 +1,14 @@ package errdefs import ( - proto "github.com/golang/protobuf/proto" + "github.com/containerd/typeurl" "github.com/moby/buildkit/util/grpcerrors" digest "github.com/opencontainers/go-digest" ) func init() { - proto.RegisterType((*Vertex)(nil), "errdefs.Vertex") - proto.RegisterType((*Source)(nil), "errdefs.Source") + typeurl.Register((*Vertex)(nil), "github.com/moby/buildkit", "errdefs.Vertex+json") + typeurl.Register((*Source)(nil), "github.com/moby/buildkit", "errdefs.Source+json") } type VertexError struct { diff --git a/vendor/github.com/moby/buildkit/solver/exporter.go b/vendor/github.com/moby/buildkit/solver/exporter.go index 6a073960dfa4baf226a5909d798002c5f69344a5..26ca2fb9291aedacd3f51acc57f535e1b15fcbce 100644 --- a/vendor/github.com/moby/buildkit/solver/exporter.go +++ b/vendor/github.com/moby/buildkit/solver/exporter.go @@ -100,7 +100,7 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach return nil, err } - remote, err = cm.results.LoadRemote(ctx, res) + remote, err = cm.results.LoadRemote(ctx, res, opt.Session) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/solver/index.go b/vendor/github.com/moby/buildkit/solver/index.go index 78a2cca256e03e531b72b2cd76d6fd315ae35cdb..4a330669d1902b242c4a9fd3cec257785c06b234 100644 --- a/vendor/github.com/moby/buildkit/solver/index.go +++ b/vendor/github.com/moby/buildkit/solver/index.go @@ -121,14 +121,14 @@ func (ei *edgeIndex) LoadOrStore(k *CacheKey, e *edge) *edge { } // enforceLinked adds links from current ID to all dep keys -func (er *edgeIndex) enforceLinked(id string, k *CacheKey) { - main, ok := er.items[id] +func (ei *edgeIndex) enforceLinked(id string, k *CacheKey) { + main, ok := ei.items[id] if !ok { main = &indexItem{ links: map[CacheInfoLink]map[string]struct{}{}, deps: map[string]struct{}{}, } - er.items[id] = main + ei.items[id] = main } deps := k.Deps() @@ -136,10 +136,10 @@ func (er *edgeIndex) enforceLinked(id string, k *CacheKey) { for i, dd := range deps { for _, d := range dd { ck := d.CacheKey.CacheKey - er.enforceIndexID(ck) + ei.enforceIndexID(ck) ll := CacheInfoLink{Input: Index(i), Digest: k.Digest(), Output: k.Output(), Selector: d.Selector} for _, ckID := range ck.indexIDs { - if item, ok := er.items[ckID]; ok { + if item, ok := ei.items[ckID]; ok { links, ok := item.links[ll] if !ok { links = map[string]struct{}{} diff --git a/vendor/github.com/moby/buildkit/solver/jobs.go b/vendor/github.com/moby/buildkit/solver/jobs.go index 182a6d023e75cc57de0caa95d580ae1977d2f910..d34db5e7e1989557d46d9f44845de6ea50c57e06 100644 --- a/vendor/github.com/moby/buildkit/solver/jobs.go +++ b/vendor/github.com/moby/buildkit/solver/jobs.go @@ -230,6 +230,7 @@ type Job struct { pw progress.Writer span opentracing.Span values sync.Map + id string progressCloser func() SessionID string @@ -267,6 +268,17 @@ func (jl *Solver) setEdge(e Edge, newEdge *edge) { st.setEdge(e.Index, newEdge) } +func (jl *Solver) getState(e Edge) *state { + jl.mu.RLock() + defer jl.mu.RUnlock() + + st, ok := jl.actives[e.Vertex.Digest()] + if !ok { + return nil + } + return st +} + func (jl *Solver) getEdge(e Edge) *edge { jl.mu.RLock() defer jl.mu.RUnlock() @@ -429,6 +441,7 @@ func (jl *Solver) NewJob(id string) (*Job, error) { pw: pw, progressCloser: progressCloser, span: (&opentracing.NoopTracer{}).StartSpan(""), + id: id, } jl.jobs[id] = j @@ -513,6 +526,8 @@ func (j *Job) Discard() error { } st.mu.Unlock() } + + delete(j.list.jobs, j.id) return nil } @@ -543,7 +558,7 @@ type activeOp interface { Exec(ctx context.Context, inputs []Result) (outputs []Result, exporters []ExportableCacheKey, err error) IgnoreCache() bool Cache() CacheManager - CalcSlowCache(context.Context, Index, ResultBasedCacheFunc, Result) (digest.Digest, error) + CalcSlowCache(context.Context, Index, PreprocessFunc, ResultBasedCacheFunc, Result) (digest.Digest, error) } func newSharedOp(resolver ResolveOpFunc, cacheManager CacheManager, st *state) *sharedOp { @@ -596,20 +611,20 @@ func (s *sharedOp) LoadCache(ctx context.Context, rec *CacheRecord) (Result, err // no cache hit. start evaluating the node span, ctx := tracing.StartSpan(ctx, "load cache: "+s.st.vtx.Name()) notifyStarted(ctx, &s.st.clientVertex, true) - res, err := s.Cache().Load(ctx, rec) + res, err := s.Cache().Load(withAncestorCacheOpts(ctx, s.st), rec) tracing.FinishWithError(span, err) notifyCompleted(ctx, &s.st.clientVertex, err, true) return res, err } -func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, f ResultBasedCacheFunc, res Result) (dgst digest.Digest, err error) { +func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, p PreprocessFunc, f ResultBasedCacheFunc, res Result) (dgst digest.Digest, err error) { defer func() { err = errdefs.WrapVertex(err, s.st.origDigest) }() key, err := s.g.Do(ctx, fmt.Sprintf("slow-compute-%d", index), func(ctx context.Context) (interface{}, error) { s.slowMu.Lock() // TODO: add helpers for these stored values - if res := s.slowCacheRes[index]; res != "" { + if res, ok := s.slowCacheRes[index]; ok { s.slowMu.Unlock() return res, nil } @@ -618,9 +633,23 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, f ResultBased return err, nil } s.slowMu.Unlock() - ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) - key, err := f(ctx, res) + complete := true + if p != nil { + st := s.st.solver.getState(s.st.vtx.Inputs()[index]) + ctx2 := opentracing.ContextWithSpan(progress.WithProgress(ctx, st.mpw), st.mspan) + err = p(ctx2, res, st) + if err != nil { + f = nil + ctx = ctx2 + } + } + + var key digest.Digest + if f != nil { + ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) + key, err = f(withAncestorCacheOpts(ctx, s.st), res, s.st) + } if err != nil { select { case <-ctx.Done(): @@ -666,6 +695,7 @@ func (s *sharedOp) CacheMap(ctx context.Context, index int) (resp *cacheMapResp, return nil, s.cacheErr } ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) + ctx = withAncestorCacheOpts(ctx, s.st) if len(s.st.vtx.Inputs()) == 0 { // no cache hit. start evaluating the node span, ctx := tracing.StartSpan(ctx, "cache request: "+s.st.vtx.Name()) @@ -721,6 +751,7 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, } ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) + ctx = withAncestorCacheOpts(ctx, s.st) // no cache hit. start evaluating the node span, ctx := tracing.StartSpan(ctx, s.st.vtx.Name()) diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go index d08d0618a199ed7e53849bbca6c081fcd328b7fd..df3e4b7404090a618c51cba9590f521e5af9e9bf 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go @@ -9,7 +9,6 @@ import ( "github.com/containerd/containerd/platforms" "github.com/mitchellh/hashstructure" - "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/executor" @@ -131,7 +130,7 @@ func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest, sid st if !ok { return nil, errors.Errorf("invalid frontend: %s", req.Frontend) } - res, err = f.Solve(ctx, b, req.FrontendOpt, req.FrontendInputs, sid) + res, err = f.Solve(ctx, b, req.FrontendOpt, req.FrontendInputs, sid, b.sm) if err != nil { return nil, errors.Wrapf(err, "failed to solve with frontend %s", req.Frontend) } @@ -245,8 +244,8 @@ func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err return nil, err } -func (s *llbBridge) Run(ctx context.Context, id string, root cache.Mountable, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) { - w, err := s.resolveWorker() +func (b *llbBridge) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) { + w, err := b.resolveWorker() if err != nil { return err } @@ -256,8 +255,8 @@ func (s *llbBridge) Run(ctx context.Context, id string, root cache.Mountable, mo return err } -func (s *llbBridge) Exec(ctx context.Context, id string, process executor.ProcessInfo) (err error) { - w, err := s.resolveWorker() +func (b *llbBridge) Exec(ctx context.Context, id string, process executor.ProcessInfo) (err error) { + w, err := b.resolveWorker() if err != nil { return err } @@ -267,8 +266,8 @@ func (s *llbBridge) Exec(ctx context.Context, id string, process executor.Proces return err } -func (s *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) { - w, err := s.resolveWorker() +func (b *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) { + w, err := b.resolveWorker() if err != nil { return "", nil, err } @@ -281,8 +280,8 @@ func (s *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb. } else { id += platforms.Format(*platform) } - err = inBuilderContext(ctx, s.builder, opt.LogName, id, func(ctx context.Context, g session.Group) error { - dgst, config, err = w.ResolveImageConfig(ctx, ref, opt, s.sm, g) + err = inBuilderContext(ctx, b.builder, opt.LogName, id, func(ctx context.Context, g session.Group) error { + dgst, config, err = w.ResolveImageConfig(ctx, ref, opt, b.sm, g) return err }) return dgst, config, err diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go index a690012287dc48c7847b55e5a6afd8747fb52034..8ecf648412e11db7926f6fb92f6abd27bfc19756 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go @@ -37,13 +37,13 @@ func mapUserToChowner(user *copy.User, idmap *idtools.IdentityMapping) (copy.Cho // non-nil old is already mapped if idmap != nil { identity, err := idmap.ToHost(idtools.Identity{ - UID: old.Uid, - GID: old.Gid, + UID: old.UID, + GID: old.GID, }) if err != nil { return nil, err } - return ©.User{Uid: identity.UID, Gid: identity.GID}, nil + return ©.User{UID: identity.UID, GID: identity.GID}, nil } } return old, nil @@ -52,14 +52,14 @@ func mapUserToChowner(user *copy.User, idmap *idtools.IdentityMapping) (copy.Cho u := *user if idmap != nil { identity, err := idmap.ToHost(idtools.Identity{ - UID: user.Uid, - GID: user.Gid, + UID: user.UID, + GID: user.GID, }) if err != nil { return nil, err } - u.Uid = identity.UID - u.Gid = identity.GID + u.UID = identity.UID + u.GID = identity.GID } return func(*copy.User) (*copy.User, error) { return &u, nil diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go index faa4cdbfd855128f69bf9fdb2d811caa4e5aeeec..a803d7f494a301f0e1285b00f29a3a08e75581a5 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go @@ -4,6 +4,7 @@ import ( "context" "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" "github.com/pkg/errors" @@ -17,25 +18,25 @@ type RefManager struct { cm cache.Manager } -func (rm *RefManager) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly bool) (fileoptypes.Mount, error) { +func (rm *RefManager) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly bool, g session.Group) (fileoptypes.Mount, error) { ir, ok := ref.(cache.ImmutableRef) if !ok && ref != nil { return nil, errors.Errorf("invalid ref type: %T", ref) } if ir != nil && readonly { - m, err := ir.Mount(ctx, readonly) + m, err := ir.Mount(ctx, readonly, g) if err != nil { return nil, err } return &Mount{m: m}, nil } - mr, err := rm.cm.New(ctx, ir, cache.WithDescription("fileop target"), cache.CachePolicyRetain) + mr, err := rm.cm.New(ctx, ir, g, cache.WithDescription("fileop target"), cache.CachePolicyRetain) if err != nil { return nil, err } - m, err := mr.Mount(ctx, readonly) + m, err := mr.Mount(ctx, readonly, g) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go index 93b32362c9de9fae9c24afd419aee90dfef0dffb..8e4848cc506b1f243fe44746247c992573a4ab3b 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go @@ -58,12 +58,12 @@ func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*copy.User, error) } if len(users) > 0 { - us.Uid = users[0].Uid - us.Gid = users[0].Gid + us.UID = users[0].Uid + us.GID = users[0].Gid } case *pb.UserOpt_ByID: - us.Uid = int(u.ByID) - us.Gid = int(u.ByID) + us.UID = int(u.ByID) + us.GID = int(u.ByID) } } @@ -108,10 +108,10 @@ func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*copy.User, error) } if len(groups) > 0 { - us.Gid = groups[0].Gid + us.GID = groups[0].Gid } case *pb.UserOpt_ByID: - us.Gid = int(u.ByID) + us.GID = int(u.ByID) } } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go index 780b559598d914b9fe59b1070893df562a8612cd..246f18706e8987c2e6c971ce0f9edb8f56dea725 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go @@ -10,5 +10,8 @@ import ( ) func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*copy.User, error) { + if chopt == nil { + return nil, nil + } return nil, errors.New("only implemented in linux") } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go b/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go new file mode 100644 index 0000000000000000000000000000000000000000..f9e3db8995baf6eb12080baab73a6129e9544b90 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go @@ -0,0 +1,516 @@ +package mounts + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/sys" + "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/secrets" + "github.com/moby/buildkit/session/sshforward" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/grpcerrors" + "github.com/moby/locker" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + bolt "go.etcd.io/bbolt" + "google.golang.org/grpc/codes" +) + +func NewMountManager(name string, cm cache.Manager, sm *session.Manager, md *metadata.Store) *MountManager { + return &MountManager{ + cm: cm, + sm: sm, + cacheMounts: map[string]*cacheRefShare{}, + md: md, + managerName: name, + } +} + +type MountManager struct { + cm cache.Manager + sm *session.Manager + cacheMountsMu sync.Mutex + cacheMounts map[string]*cacheRefShare + md *metadata.Store + managerName string +} + +func (mm *MountManager) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, m *pb.Mount, sharing pb.CacheSharingOpt, s session.Group) (mref cache.MutableRef, err error) { + g := &cacheRefGetter{ + locker: &mm.cacheMountsMu, + cacheMounts: mm.cacheMounts, + cm: mm.cm, + md: mm.md, + globalCacheRefs: sharedCacheRefs, + name: fmt.Sprintf("cached mount %s from %s", m.Dest, mm.managerName), + session: s, + } + return g.getRefCacheDir(ctx, ref, id, sharing) +} + +type cacheRefGetter struct { + locker sync.Locker + cacheMounts map[string]*cacheRefShare + cm cache.Manager + md *metadata.Store + globalCacheRefs *cacheRefs + name string + session session.Group +} + +func (g *cacheRefGetter) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) { + key := "cache-dir:" + id + if ref != nil { + key += ":" + ref.ID() + } + mu := g.locker + mu.Lock() + defer mu.Unlock() + + if ref, ok := g.cacheMounts[key]; ok { + return ref.clone(), nil + } + defer func() { + if err == nil { + share := &cacheRefShare{MutableRef: mref, refs: map[*cacheRef]struct{}{}} + g.cacheMounts[key] = share + mref = share.clone() + } + }() + + switch sharing { + case pb.CacheSharingOpt_SHARED: + return g.globalCacheRefs.get(key, func() (cache.MutableRef, error) { + return g.getRefCacheDirNoCache(ctx, key, ref, id, false) + }) + case pb.CacheSharingOpt_PRIVATE: + return g.getRefCacheDirNoCache(ctx, key, ref, id, false) + case pb.CacheSharingOpt_LOCKED: + return g.getRefCacheDirNoCache(ctx, key, ref, id, true) + default: + return nil, errors.Errorf("invalid cache sharing option: %s", sharing.String()) + } +} + +func (g *cacheRefGetter) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, block bool) (cache.MutableRef, error) { + makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) { + return g.cm.New(ctx, ref, g.session, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(g.name), cache.CachePolicyRetain) + } + + cacheRefsLocker.Lock(key) + defer cacheRefsLocker.Unlock(key) + for { + sis, err := g.md.Search(key) + if err != nil { + return nil, err + } + locked := false + for _, si := range sis { + if mRef, err := g.cm.GetMutable(ctx, si.ID()); err == nil { + logrus.Debugf("reusing ref for cache dir: %s", mRef.ID()) + return mRef, nil + } else if errors.Is(err, cache.ErrLocked) { + locked = true + } + } + if block && locked { + cacheRefsLocker.Unlock(key) + select { + case <-ctx.Done(): + cacheRefsLocker.Lock(key) + return nil, ctx.Err() + case <-time.After(100 * time.Millisecond): + cacheRefsLocker.Lock(key) + } + } else { + break + } + } + mRef, err := makeMutable(ref) + if err != nil { + return nil, err + } + + si, _ := g.md.Get(mRef.ID()) + v, err := metadata.NewValue(key) + if err != nil { + mRef.Release(context.TODO()) + return nil, err + } + v.Index = key + if err := si.Update(func(b *bolt.Bucket) error { + return si.SetValue(b, key, v) + }); err != nil { + mRef.Release(context.TODO()) + return nil, err + } + return mRef, nil +} + +func (mm *MountManager) getSSHMountable(ctx context.Context, m *pb.Mount, g session.Group) (cache.Mountable, error) { + var caller session.Caller + err := mm.sm.Any(ctx, g, func(ctx context.Context, _ string, c session.Caller) error { + if err := sshforward.CheckSSHID(ctx, c, m.SSHOpt.ID); err != nil { + if m.SSHOpt.Optional { + return nil + } + if grpcerrors.Code(err) == codes.Unimplemented { + return errors.Errorf("no SSH key %q forwarded from the client", m.SSHOpt.ID) + } + return err + } + caller = c + return nil + }) + if err != nil { + return nil, err + } + // because ssh socket remains active, to actually handle session disconnecting ssh error + // should restart the whole exec with new session + return &sshMount{mount: m, caller: caller, idmap: mm.cm.IdentityMapping()}, nil +} + +type sshMount struct { + mount *pb.Mount + caller session.Caller + idmap *idtools.IdentityMapping +} + +func (sm *sshMount) Mount(ctx context.Context, readonly bool, g session.Group) (snapshot.Mountable, error) { + return &sshMountInstance{sm: sm, idmap: sm.idmap}, nil +} + +type sshMountInstance struct { + sm *sshMount + idmap *idtools.IdentityMapping +} + +func (sm *sshMountInstance) Mount() ([]mount.Mount, func() error, error) { + ctx, cancel := context.WithCancel(context.TODO()) + + uid := int(sm.sm.mount.SSHOpt.Uid) + gid := int(sm.sm.mount.SSHOpt.Gid) + + if sm.idmap != nil { + identity, err := sm.idmap.ToHost(idtools.Identity{ + UID: uid, + GID: gid, + }) + if err != nil { + cancel() + return nil, nil, err + } + uid = identity.UID + gid = identity.GID + } + + sock, cleanup, err := sshforward.MountSSHSocket(ctx, sm.sm.caller, sshforward.SocketOpt{ + ID: sm.sm.mount.SSHOpt.ID, + UID: uid, + GID: gid, + Mode: int(sm.sm.mount.SSHOpt.Mode & 0777), + }) + if err != nil { + cancel() + return nil, nil, err + } + release := func() error { + var err error + if cleanup != nil { + err = cleanup() + } + cancel() + return err + } + + return []mount.Mount{{ + Type: "bind", + Source: sock, + Options: []string{"rbind"}, + }}, release, nil +} + +func (sm *sshMountInstance) IdentityMapping() *idtools.IdentityMapping { + return sm.idmap +} + +func (mm *MountManager) getSecretMountable(ctx context.Context, m *pb.Mount, g session.Group) (cache.Mountable, error) { + if m.SecretOpt == nil { + return nil, errors.Errorf("invalid secret mount options") + } + sopt := *m.SecretOpt + + id := sopt.ID + if id == "" { + return nil, errors.Errorf("secret ID missing from mount options") + } + var dt []byte + var err error + err = mm.sm.Any(ctx, g, func(ctx context.Context, _ string, caller session.Caller) error { + dt, err = secrets.GetSecret(ctx, caller, id) + if err != nil { + if errors.Is(err, secrets.ErrNotFound) && m.SecretOpt.Optional { + return nil + } + return err + } + return nil + }) + if err != nil || dt == nil { + return nil, err + } + return &secretMount{mount: m, data: dt, idmap: mm.cm.IdentityMapping()}, nil +} + +type secretMount struct { + mount *pb.Mount + data []byte + idmap *idtools.IdentityMapping +} + +func (sm *secretMount) Mount(ctx context.Context, readonly bool, g session.Group) (snapshot.Mountable, error) { + return &secretMountInstance{sm: sm, idmap: sm.idmap}, nil +} + +type secretMountInstance struct { + sm *secretMount + root string + idmap *idtools.IdentityMapping +} + +func (sm *secretMountInstance) Mount() ([]mount.Mount, func() error, error) { + dir, err := ioutil.TempDir("", "buildkit-secrets") + if err != nil { + return nil, nil, errors.Wrap(err, "failed to create temp dir") + } + cleanupDir := func() error { + return os.RemoveAll(dir) + } + + if err := os.Chmod(dir, 0711); err != nil { + cleanupDir() + return nil, nil, err + } + + tmpMount := mount.Mount{ + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nodev", "nosuid", "noexec", fmt.Sprintf("uid=%d,gid=%d", os.Geteuid(), os.Getegid())}, + } + + if sys.RunningInUserNS() { + tmpMount.Options = nil + } + + if err := mount.All([]mount.Mount{tmpMount}, dir); err != nil { + cleanupDir() + return nil, nil, errors.Wrap(err, "unable to setup secret mount") + } + sm.root = dir + + cleanup := func() error { + if err := mount.Unmount(dir, 0); err != nil { + return err + } + return cleanupDir() + } + + randID := identity.NewID() + fp := filepath.Join(dir, randID) + if err := ioutil.WriteFile(fp, sm.sm.data, 0600); err != nil { + cleanup() + return nil, nil, err + } + + uid := int(sm.sm.mount.SecretOpt.Uid) + gid := int(sm.sm.mount.SecretOpt.Gid) + + if sm.idmap != nil { + identity, err := sm.idmap.ToHost(idtools.Identity{ + UID: uid, + GID: gid, + }) + if err != nil { + cleanup() + return nil, nil, err + } + uid = identity.UID + gid = identity.GID + } + + if err := os.Chown(fp, uid, gid); err != nil { + cleanup() + return nil, nil, err + } + + if err := os.Chmod(fp, os.FileMode(sm.sm.mount.SecretOpt.Mode&0777)); err != nil { + cleanup() + return nil, nil, err + } + + return []mount.Mount{{ + Type: "bind", + Source: fp, + Options: []string{"ro", "rbind", "nodev", "nosuid", "noexec"}, + }}, cleanup, nil +} + +func (sm *secretMountInstance) IdentityMapping() *idtools.IdentityMapping { + return sm.idmap +} + +func (mm *MountManager) MountableCache(ctx context.Context, m *pb.Mount, ref cache.ImmutableRef, g session.Group) (cache.MutableRef, error) { + if m.CacheOpt == nil { + return nil, errors.Errorf("missing cache mount options") + } + return mm.getRefCacheDir(ctx, ref, m.CacheOpt.ID, m, m.CacheOpt.Sharing, g) +} + +func (mm *MountManager) MountableTmpFS() cache.Mountable { + return newTmpfs(mm.cm.IdentityMapping()) +} + +func (mm *MountManager) MountableSecret(ctx context.Context, m *pb.Mount, g session.Group) (cache.Mountable, error) { + return mm.getSecretMountable(ctx, m, g) +} + +func (mm *MountManager) MountableSSH(ctx context.Context, m *pb.Mount, g session.Group) (cache.Mountable, error) { + return mm.getSSHMountable(ctx, m, g) +} + +func newTmpfs(idmap *idtools.IdentityMapping) cache.Mountable { + return &tmpfs{idmap: idmap} +} + +type tmpfs struct { + idmap *idtools.IdentityMapping +} + +func (f *tmpfs) Mount(ctx context.Context, readonly bool, g session.Group) (snapshot.Mountable, error) { + return &tmpfsMount{readonly: readonly, idmap: f.idmap}, nil +} + +type tmpfsMount struct { + readonly bool + idmap *idtools.IdentityMapping +} + +func (m *tmpfsMount) Mount() ([]mount.Mount, func() error, error) { + opt := []string{"nosuid"} + if m.readonly { + opt = append(opt, "ro") + } + return []mount.Mount{{ + Type: "tmpfs", + Source: "tmpfs", + Options: opt, + }}, func() error { return nil }, nil +} + +func (m *tmpfsMount) IdentityMapping() *idtools.IdentityMapping { + return m.idmap +} + +var cacheRefsLocker = locker.New() +var sharedCacheRefs = &cacheRefs{} + +type cacheRefs struct { + mu sync.Mutex + shares map[string]*cacheRefShare +} + +// ClearActiveCacheMounts clears shared cache mounts currently in use. +// Caller needs to hold CacheMountsLocker before calling +func ClearActiveCacheMounts() { + sharedCacheRefs.shares = nil +} + +func CacheMountsLocker() sync.Locker { + return &sharedCacheRefs.mu +} + +func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) { + r.mu.Lock() + defer r.mu.Unlock() + + if r.shares == nil { + r.shares = map[string]*cacheRefShare{} + } + + share, ok := r.shares[key] + if ok { + return share.clone(), nil + } + + mref, err := fn() + if err != nil { + return nil, err + } + + share = &cacheRefShare{MutableRef: mref, main: r, key: key, refs: map[*cacheRef]struct{}{}} + r.shares[key] = share + return share.clone(), nil +} + +type cacheRefShare struct { + cache.MutableRef + mu sync.Mutex + refs map[*cacheRef]struct{} + main *cacheRefs + key string +} + +func (r *cacheRefShare) clone() cache.MutableRef { + cacheRef := &cacheRef{cacheRefShare: r} + if cacheRefCloneHijack != nil { + cacheRefCloneHijack() + } + r.mu.Lock() + r.refs[cacheRef] = struct{}{} + r.mu.Unlock() + return cacheRef +} + +func (r *cacheRefShare) release(ctx context.Context) error { + if r.main != nil { + delete(r.main.shares, r.key) + } + return r.MutableRef.Release(ctx) +} + +var cacheRefReleaseHijack func() +var cacheRefCloneHijack func() + +type cacheRef struct { + *cacheRefShare +} + +func (r *cacheRef) Release(ctx context.Context) error { + if r.main != nil { + r.main.mu.Lock() + defer r.main.mu.Unlock() + } + r.mu.Lock() + defer r.mu.Unlock() + delete(r.refs, r) + if len(r.refs) == 0 { + if cacheRefReleaseHijack != nil { + cacheRefReleaseHijack() + } + return r.release(ctx) + } + return nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go index d8b748de2e13e70a704e70e52ebbd03e77bfcdc2..5e7fbc7a15a058eb30a1ffcfab43b9474b767bb3 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go @@ -54,6 +54,7 @@ func (b *buildOp) CacheMap(ctx context.Context, g session.Group, index int) (*so Deps: make([]struct { Selector digest.Digest ComputeDigestFunc solver.ResultBasedCacheFunc + PreprocessFunc solver.PreprocessFunc }, len(b.v.Inputs())), }, true, nil } @@ -80,7 +81,7 @@ func (b *buildOp) Exec(ctx context.Context, g session.Group, inputs []solver.Res return nil, errors.Errorf("invalid reference for build %T", inp.Sys()) } - mount, err := ref.ImmutableRef.Mount(ctx, true) + mount, err := ref.ImmutableRef.Mount(ctx, true, g) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go index 6258ff53436dd6dfa3e3d4a4de5a387bcffe054a..1ad189033015d934e0dc22799c12c9a90bf730c7 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go @@ -5,34 +5,22 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "net" "os" "path" - "path/filepath" "sort" "strings" - "sync" - "time" - "github.com/containerd/containerd/mount" "github.com/containerd/containerd/platforms" - "github.com/containerd/containerd/sys" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/locker" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/metadata" - "github.com/moby/buildkit/client" "github.com/moby/buildkit/executor" - "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/secrets" - "github.com/moby/buildkit/session/sshforward" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/mounts" "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/grpcerrors" "github.com/moby/buildkit/util/progress/logs" utilsystem "github.com/moby/buildkit/util/system" "github.com/moby/buildkit/worker" @@ -40,8 +28,6 @@ import ( specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" - bolt "go.etcd.io/bbolt" - "google.golang.org/grpc/codes" ) const execCacheType = "buildkit.exec.v0" @@ -49,31 +35,26 @@ const execCacheType = "buildkit.exec.v0" type execOp struct { op *pb.ExecOp cm cache.Manager - sm *session.Manager - md *metadata.Store + mm *mounts.MountManager exec executor.Executor w worker.Worker platform *pb.Platform numInputs int - - cacheMounts map[string]*cacheRefShare - cacheMountsMu sync.Mutex } func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, sm *session.Manager, md *metadata.Store, exec executor.Executor, w worker.Worker) (solver.Op, error) { if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { return nil, err } + name := fmt.Sprintf("exec %s", strings.Join(op.Exec.Meta.Args, " ")) return &execOp{ - op: op.Exec, - cm: cm, - sm: sm, - md: md, - exec: exec, - numInputs: len(v.Inputs()), - w: w, - platform: platform, - cacheMounts: map[string]*cacheRefShare{}, + op: op.Exec, + mm: mounts.NewMountManager(name, cm, sm, md), + cm: cm, + exec: exec, + numInputs: len(v.Inputs()), + w: w, + platform: platform, }, nil } @@ -137,6 +118,7 @@ func (e *execOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol Deps: make([]struct { Selector digest.Digest ComputeDigestFunc solver.ResultBasedCacheFunc + PreprocessFunc solver.PreprocessFunc }, e.numInputs), } @@ -156,6 +138,7 @@ func (e *execOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol if !dep.NoContentBasedHash { cm.Deps[i].ComputeDigestFunc = llbsolver.NewContentHashFunc(toSelectors(dedupePaths(dep.Selectors))) } + cm.Deps[i].PreprocessFunc = llbsolver.UnlazyResultFunc } return cm, true, nil @@ -221,328 +204,6 @@ func (e *execOp) getMountDeps() ([]dep, error) { return deps, nil } -func (e *execOp) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, m *pb.Mount, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) { - g := &cacheRefGetter{ - locker: &e.cacheMountsMu, - cacheMounts: e.cacheMounts, - cm: e.cm, - md: e.md, - globalCacheRefs: sharedCacheRefs, - name: fmt.Sprintf("cached mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")), - } - return g.getRefCacheDir(ctx, ref, id, sharing) -} - -type cacheRefGetter struct { - locker sync.Locker - cacheMounts map[string]*cacheRefShare - cm cache.Manager - md *metadata.Store - globalCacheRefs *cacheRefs - name string -} - -func (g *cacheRefGetter) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) { - key := "cache-dir:" + id - if ref != nil { - key += ":" + ref.ID() - } - mu := g.locker - mu.Lock() - defer mu.Unlock() - - if ref, ok := g.cacheMounts[key]; ok { - return ref.clone(), nil - } - defer func() { - if err == nil { - share := &cacheRefShare{MutableRef: mref, refs: map[*cacheRef]struct{}{}} - g.cacheMounts[key] = share - mref = share.clone() - } - }() - - switch sharing { - case pb.CacheSharingOpt_SHARED: - return g.globalCacheRefs.get(key, func() (cache.MutableRef, error) { - return g.getRefCacheDirNoCache(ctx, key, ref, id, false) - }) - case pb.CacheSharingOpt_PRIVATE: - return g.getRefCacheDirNoCache(ctx, key, ref, id, false) - case pb.CacheSharingOpt_LOCKED: - return g.getRefCacheDirNoCache(ctx, key, ref, id, true) - default: - return nil, errors.Errorf("invalid cache sharing option: %s", sharing.String()) - } -} - -func (g *cacheRefGetter) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, block bool) (cache.MutableRef, error) { - makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) { - return g.cm.New(ctx, ref, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(g.name), cache.CachePolicyRetain) - } - - cacheRefsLocker.Lock(key) - defer cacheRefsLocker.Unlock(key) - for { - sis, err := g.md.Search(key) - if err != nil { - return nil, err - } - locked := false - for _, si := range sis { - if mRef, err := g.cm.GetMutable(ctx, si.ID()); err == nil { - logrus.Debugf("reusing ref for cache dir: %s", mRef.ID()) - return mRef, nil - } else if errors.Is(err, cache.ErrLocked) { - locked = true - } - } - if block && locked { - cacheRefsLocker.Unlock(key) - select { - case <-ctx.Done(): - cacheRefsLocker.Lock(key) - return nil, ctx.Err() - case <-time.After(100 * time.Millisecond): - cacheRefsLocker.Lock(key) - } - } else { - break - } - } - mRef, err := makeMutable(ref) - if err != nil { - return nil, err - } - - si, _ := g.md.Get(mRef.ID()) - v, err := metadata.NewValue(key) - if err != nil { - mRef.Release(context.TODO()) - return nil, err - } - v.Index = key - if err := si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, key, v) - }); err != nil { - mRef.Release(context.TODO()) - return nil, err - } - return mRef, nil -} - -func (e *execOp) getSSHMountable(ctx context.Context, m *pb.Mount, g session.Group) (cache.Mountable, error) { - var caller session.Caller - err := e.sm.Any(ctx, g, func(ctx context.Context, _ string, c session.Caller) error { - if err := sshforward.CheckSSHID(ctx, c, m.SSHOpt.ID); err != nil { - if m.SSHOpt.Optional { - return nil - } - if grpcerrors.Code(err) == codes.Unimplemented { - return errors.Errorf("no SSH key %q forwarded from the client", m.SSHOpt.ID) - } - return err - } - caller = c - return nil - }) - if err != nil { - return nil, err - } - // because ssh socket remains active, to actually handle session disconnecting ssh error - // should restart the whole exec with new session - return &sshMount{mount: m, caller: caller, idmap: e.cm.IdentityMapping()}, nil -} - -type sshMount struct { - mount *pb.Mount - caller session.Caller - idmap *idtools.IdentityMapping -} - -func (sm *sshMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { - return &sshMountInstance{sm: sm, idmap: sm.idmap}, nil -} - -type sshMountInstance struct { - sm *sshMount - idmap *idtools.IdentityMapping -} - -func (sm *sshMountInstance) Mount() ([]mount.Mount, func() error, error) { - ctx, cancel := context.WithCancel(context.TODO()) - - uid := int(sm.sm.mount.SSHOpt.Uid) - gid := int(sm.sm.mount.SSHOpt.Gid) - - if sm.idmap != nil { - identity, err := sm.idmap.ToHost(idtools.Identity{ - UID: uid, - GID: gid, - }) - if err != nil { - cancel() - return nil, nil, err - } - uid = identity.UID - gid = identity.GID - } - - sock, cleanup, err := sshforward.MountSSHSocket(ctx, sm.sm.caller, sshforward.SocketOpt{ - ID: sm.sm.mount.SSHOpt.ID, - UID: uid, - GID: gid, - Mode: int(sm.sm.mount.SSHOpt.Mode & 0777), - }) - if err != nil { - cancel() - return nil, nil, err - } - release := func() error { - var err error - if cleanup != nil { - err = cleanup() - } - cancel() - return err - } - - return []mount.Mount{{ - Type: "bind", - Source: sock, - Options: []string{"rbind"}, - }}, release, nil -} - -func (sm *sshMountInstance) IdentityMapping() *idtools.IdentityMapping { - return sm.idmap -} - -func (e *execOp) getSecretMountable(ctx context.Context, m *pb.Mount, g session.Group) (cache.Mountable, error) { - if m.SecretOpt == nil { - return nil, errors.Errorf("invalid sercet mount options") - } - sopt := *m.SecretOpt - - id := sopt.ID - if id == "" { - return nil, errors.Errorf("secret ID missing from mount options") - } - var dt []byte - var err error - err = e.sm.Any(ctx, g, func(ctx context.Context, _ string, caller session.Caller) error { - dt, err = secrets.GetSecret(ctx, caller, id) - if err != nil { - if errors.Is(err, secrets.ErrNotFound) && m.SecretOpt.Optional { - return nil - } - return err - } - return nil - }) - if err != nil || dt == nil { - return nil, err - } - return &secretMount{mount: m, data: dt, idmap: e.cm.IdentityMapping()}, nil -} - -type secretMount struct { - mount *pb.Mount - data []byte - idmap *idtools.IdentityMapping -} - -func (sm *secretMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { - return &secretMountInstance{sm: sm, idmap: sm.idmap}, nil -} - -type secretMountInstance struct { - sm *secretMount - root string - idmap *idtools.IdentityMapping -} - -func (sm *secretMountInstance) Mount() ([]mount.Mount, func() error, error) { - dir, err := ioutil.TempDir("", "buildkit-secrets") - if err != nil { - return nil, nil, errors.Wrap(err, "failed to create temp dir") - } - cleanupDir := func() error { - return os.RemoveAll(dir) - } - - if err := os.Chmod(dir, 0711); err != nil { - cleanupDir() - return nil, nil, err - } - - tmpMount := mount.Mount{ - Type: "tmpfs", - Source: "tmpfs", - Options: []string{"nodev", "nosuid", "noexec", fmt.Sprintf("uid=%d,gid=%d", os.Geteuid(), os.Getegid())}, - } - - if sys.RunningInUserNS() { - tmpMount.Options = nil - } - - if err := mount.All([]mount.Mount{tmpMount}, dir); err != nil { - cleanupDir() - return nil, nil, errors.Wrap(err, "unable to setup secret mount") - } - sm.root = dir - - cleanup := func() error { - if err := mount.Unmount(dir, 0); err != nil { - return err - } - return cleanupDir() - } - - randID := identity.NewID() - fp := filepath.Join(dir, randID) - if err := ioutil.WriteFile(fp, sm.sm.data, 0600); err != nil { - cleanup() - return nil, nil, err - } - - uid := int(sm.sm.mount.SecretOpt.Uid) - gid := int(sm.sm.mount.SecretOpt.Gid) - - if sm.idmap != nil { - identity, err := sm.idmap.ToHost(idtools.Identity{ - UID: uid, - GID: gid, - }) - if err != nil { - cleanup() - return nil, nil, err - } - uid = identity.UID - gid = identity.GID - } - - if err := os.Chown(fp, uid, gid); err != nil { - cleanup() - return nil, nil, err - } - - if err := os.Chmod(fp, os.FileMode(sm.sm.mount.SecretOpt.Mode&0777)); err != nil { - cleanup() - return nil, nil, err - } - - return []mount.Mount{{ - Type: "bind", - Source: fp, - Options: []string{"ro", "rbind", "nodev", "nosuid", "noexec"}, - }}, cleanup, nil -} - -func (sm *secretMountInstance) IdentityMapping() *idtools.IdentityMapping { - return sm.idmap -} - func addDefaultEnvvar(env []string, k, v string) []string { for _, e := range env { if strings.HasPrefix(e, k+"=") { @@ -592,14 +253,14 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) { desc := fmt.Sprintf("mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")) - return e.cm.New(ctx, ref, cache.WithDescription(desc)) + return e.cm.New(ctx, ref, g, cache.WithDescription(desc)) } switch m.MountType { case pb.MountType_BIND: // if mount creates an output if m.Output != pb.SkipOutput { - // it it is readonly and not root then output is the input + // if it is readonly and not root then output is the input if m.Readonly && ref != nil && m.Dest != pb.RootMount { outputs = append(outputs, ref.Clone()) } else { @@ -622,10 +283,7 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu } case pb.MountType_CACHE: - if m.CacheOpt == nil { - return nil, errors.Errorf("missing cache mount options") - } - mRef, err := e.getRefCacheDir(ctx, ref, m.CacheOpt.ID, m, m.CacheOpt.Sharing) + mRef, err := e.mm.MountableCache(ctx, m, ref, g) if err != nil { return nil, err } @@ -638,27 +296,25 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu } case pb.MountType_TMPFS: - mountable = newTmpfs(e.cm.IdentityMapping()) - + mountable = e.mm.MountableTmpFS() case pb.MountType_SECRET: - secretMount, err := e.getSecretMountable(ctx, m, g) + var err error + mountable, err = e.mm.MountableSecret(ctx, m, g) if err != nil { return nil, err } - if secretMount == nil { + if mountable == nil { continue } - mountable = secretMount - case pb.MountType_SSH: - sshMount, err := e.getSSHMountable(ctx, m, g) + var err error + mountable, err = e.mm.MountableSSH(ctx, m, g) if err != nil { return nil, err } - if sshMount == nil { + if mountable == nil { continue } - mountable = sshMount default: return nil, errors.Errorf("mount type %s not implemented", m.MountType) @@ -684,7 +340,11 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu root = active } } else { - mounts = append(mounts, executor.Mount{Src: mountable, Dest: m.Dest, Readonly: m.Readonly, Selector: m.Selector}) + mws := mountWithSession(mountable, g) + mws.Dest = m.Dest + mws.Readonly = m.Readonly + mws.Selector = m.Selector + mounts = append(mounts, mws) } } @@ -717,6 +377,7 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu Env: e.op.Meta.Env, Cwd: e.op.Meta.Cwd, User: e.op.Meta.User, + Hostname: e.op.Meta.Hostname, ReadonlyRootFS: readonlyRootFS, ExtraHosts: extraHosts, NetMode: e.op.Network, @@ -726,13 +387,17 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu if e.op.Meta.ProxyEnv != nil { meta.Env = append(meta.Env, proxyEnvList(e.op.Meta.ProxyEnv)...) } - meta.Env = addDefaultEnvvar(meta.Env, "PATH", utilsystem.DefaultPathEnv) + var currentOS string + if e.platform != nil { + currentOS = e.platform.OS + } + meta.Env = addDefaultEnvvar(meta.Env, "PATH", utilsystem.DefaultPathEnv(currentOS)) stdout, stderr := logs.NewLogStreams(ctx, os.Getenv("BUILDKIT_DEBUG_EXEC_OUTPUT") == "1") defer stdout.Close() defer stderr.Close() - if err := e.exec.Run(ctx, "", root, mounts, executor.ProcessInfo{Meta: meta, Stdin: nil, Stdout: stdout, Stderr: stderr}, nil); err != nil { + if err := e.exec.Run(ctx, "", mountWithSession(root, g), mounts, executor.ProcessInfo{Meta: meta, Stdin: nil, Stdout: stdout, Stderr: stderr}, nil); err != nil { return nil, errors.Wrapf(err, "executor failed running %v", meta.Args) } @@ -769,130 +434,6 @@ func proxyEnvList(p *pb.ProxyEnv) []string { return out } -func newTmpfs(idmap *idtools.IdentityMapping) cache.Mountable { - return &tmpfs{idmap: idmap} -} - -type tmpfs struct { - idmap *idtools.IdentityMapping -} - -func (f *tmpfs) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { - return &tmpfsMount{readonly: readonly, idmap: f.idmap}, nil -} - -type tmpfsMount struct { - readonly bool - idmap *idtools.IdentityMapping -} - -func (m *tmpfsMount) Mount() ([]mount.Mount, func() error, error) { - opt := []string{"nosuid"} - if m.readonly { - opt = append(opt, "ro") - } - return []mount.Mount{{ - Type: "tmpfs", - Source: "tmpfs", - Options: opt, - }}, func() error { return nil }, nil -} - -func (m *tmpfsMount) IdentityMapping() *idtools.IdentityMapping { - return m.idmap -} - -var cacheRefsLocker = locker.New() -var sharedCacheRefs = &cacheRefs{} - -type cacheRefs struct { - mu sync.Mutex - shares map[string]*cacheRefShare -} - -// ClearActiveCacheMounts clears shared cache mounts currently in use. -// Caller needs to hold CacheMountsLocker before calling -func ClearActiveCacheMounts() { - sharedCacheRefs.shares = nil -} - -func CacheMountsLocker() sync.Locker { - return &sharedCacheRefs.mu -} - -func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) { - r.mu.Lock() - defer r.mu.Unlock() - - if r.shares == nil { - r.shares = map[string]*cacheRefShare{} - } - - share, ok := r.shares[key] - if ok { - return share.clone(), nil - } - - mref, err := fn() - if err != nil { - return nil, err - } - - share = &cacheRefShare{MutableRef: mref, main: r, key: key, refs: map[*cacheRef]struct{}{}} - r.shares[key] = share - return share.clone(), nil -} - -type cacheRefShare struct { - cache.MutableRef - mu sync.Mutex - refs map[*cacheRef]struct{} - main *cacheRefs - key string -} - -func (r *cacheRefShare) clone() cache.MutableRef { - cacheRef := &cacheRef{cacheRefShare: r} - if cacheRefCloneHijack != nil { - cacheRefCloneHijack() - } - r.mu.Lock() - r.refs[cacheRef] = struct{}{} - r.mu.Unlock() - return cacheRef -} - -func (r *cacheRefShare) release(ctx context.Context) error { - if r.main != nil { - delete(r.main.shares, r.key) - } - return r.MutableRef.Release(ctx) -} - -var cacheRefReleaseHijack func() -var cacheRefCloneHijack func() - -type cacheRef struct { - *cacheRefShare -} - -func (r *cacheRef) Release(ctx context.Context) error { - if r.main != nil { - r.main.mu.Lock() - defer r.main.mu.Unlock() - } - r.mu.Lock() - defer r.mu.Unlock() - delete(r.refs, r) - if len(r.refs) == 0 { - if cacheRefReleaseHijack != nil { - cacheRefReleaseHijack() - } - return r.release(ctx) - } - return nil -} - func parseExtraHosts(ips []*pb.HostIP) ([]executor.HostIP, error) { out := make([]executor.HostIP, len(ips)) for i, hip := range ips { @@ -907,3 +448,20 @@ func parseExtraHosts(ips []*pb.HostIP) ([]executor.HostIP, error) { } return out, nil } + +func mountWithSession(m cache.Mountable, g session.Group) executor.Mount { + _, readonly := m.(cache.ImmutableRef) + return executor.Mount{ + Src: &mountable{m: m, g: g}, + Readonly: readonly, + } +} + +type mountable struct { + m cache.Mountable + g session.Group +} + +func (m *mountable) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { + return m.m.Mount(ctx, readonly, m.g) +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec_binfmt.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec_binfmt.go index 5eea8ff95388b9487a0371a540a0f038a227a3a0..0603cf57fcb0eb464e1ec72e81eea631c1136881 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec_binfmt.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec_binfmt.go @@ -12,7 +12,7 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/binfmt_misc" + "github.com/moby/buildkit/util/archutil" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" copy "github.com/tonistiigi/fsutil/copy" @@ -83,7 +83,7 @@ func (m *staticEmulatorMount) IdentityMapping() *idtools.IdentityMapping { } func getEmulator(p *pb.Platform, idmap *idtools.IdentityMapping) (*emulator, error) { - all := binfmt_misc.SupportedPlatforms(false) + all := archutil.SupportedPlatforms(false) m := make(map[string]struct{}, len(all)) for _, p := range all { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go index db10cfb0f3d502707681cf93872b0e640b13d870..78b0c03144251e5a3e71d279e33b26a6ebaa0612 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go @@ -120,6 +120,7 @@ func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol Deps: make([]struct { Selector digest.Digest ComputeDigestFunc solver.ResultBasedCacheFunc + PreprocessFunc solver.PreprocessFunc }, f.numInputs), } @@ -138,6 +139,9 @@ func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol cm.Deps[idx].ComputeDigestFunc = llbsolver.NewContentHashFunc(dedupeSelectors(m)) } + for idx := range cm.Deps { + cm.Deps[idx].PreprocessFunc = llbsolver.UnlazyResultFunc + } return cm, true, nil } @@ -152,7 +156,7 @@ func (f *fileOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu inpRefs = append(inpRefs, workerRef.ImmutableRef) } - outs, err := f.solver.Solve(ctx, inpRefs, f.op.Actions) + outs, err := f.solver.Solve(ctx, inpRefs, f.op.Actions, g) if err != nil { return nil, err } @@ -279,7 +283,7 @@ type input struct { ref fileoptypes.Ref } -func (s *FileOpSolver) Solve(ctx context.Context, inputs []fileoptypes.Ref, actions []*pb.FileAction) ([]fileoptypes.Ref, error) { +func (s *FileOpSolver) Solve(ctx context.Context, inputs []fileoptypes.Ref, actions []*pb.FileAction, g session.Group) ([]fileoptypes.Ref, error) { for i, a := range actions { if int(a.Input) < -1 || int(a.Input) >= len(inputs)+len(actions) { return nil, errors.Errorf("invalid input index %d, %d provided", a.Input, len(inputs)+len(actions)) @@ -337,7 +341,7 @@ func (s *FileOpSolver) Solve(ctx context.Context, inputs []fileoptypes.Ref, acti if err := s.validate(idx, inputs, actions, nil); err != nil { return err } - inp, err := s.getInput(ctx, idx, inputs, actions) + inp, err := s.getInput(ctx, idx, inputs, actions, g) if err != nil { return err } @@ -378,7 +382,7 @@ func (s *FileOpSolver) validate(idx int, inputs []fileoptypes.Ref, actions []*pb return nil } -func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptypes.Ref, actions []*pb.FileAction) (input, error) { +func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptypes.Ref, actions []*pb.FileAction, g session.Group) (input, error) { inp, err := s.g.Do(ctx, fmt.Sprintf("inp-%d", idx), func(ctx context.Context) (_ interface{}, err error) { s.mu.Lock() inp := s.ins[idx] @@ -411,12 +415,12 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp loadInput := func(ctx context.Context) func() error { return func() error { - inp, err := s.getInput(ctx, int(action.Input), inputs, actions) + inp, err := s.getInput(ctx, int(action.Input), inputs, actions, g) if err != nil { return err } if inp.ref != nil { - m, err := s.r.Prepare(ctx, inp.ref, false) + m, err := s.r.Prepare(ctx, inp.ref, false, g) if err != nil { return err } @@ -431,12 +435,12 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp loadSecondaryInput := func(ctx context.Context) func() error { return func() error { - inp, err := s.getInput(ctx, int(action.SecondaryInput), inputs, actions) + inp, err := s.getInput(ctx, int(action.SecondaryInput), inputs, actions, g) if err != nil { return err } if inp.ref != nil { - m, err := s.r.Prepare(ctx, inp.ref, true) + m, err := s.r.Prepare(ctx, inp.ref, true, g) if err != nil { return err } @@ -459,12 +463,12 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp if u.ByName.Input < 0 { return nil, errors.Errorf("invalid user index: %d", u.ByName.Input) } - inp, err := s.getInput(ctx, int(u.ByName.Input), inputs, actions) + inp, err := s.getInput(ctx, int(u.ByName.Input), inputs, actions, g) if err != nil { return nil, err } if inp.ref != nil { - mm, err := s.r.Prepare(ctx, inp.ref, true) + mm, err := s.r.Prepare(ctx, inp.ref, true, g) if err != nil { return nil, err } @@ -515,7 +519,7 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp } if inpMount == nil { - m, err := s.r.Prepare(ctx, nil, false) + m, err := s.r.Prepare(ctx, nil, false, g) if err != nil { return nil, err } @@ -546,7 +550,7 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp } case *pb.FileAction_Copy: if inpMountSecondary == nil { - m, err := s.r.Prepare(ctx, nil, true) + m, err := s.r.Prepare(ctx, nil, true, g) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes/types.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes/types.go index 67aab0267754b5c03027641b3b623b0fd0031947..ca7f3e2c5b920c7ba498c306c42ca45d1af60fd4 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes/types.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes/types.go @@ -3,6 +3,7 @@ package fileoptypes import ( "context" + "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver/pb" ) @@ -23,6 +24,6 @@ type Backend interface { } type RefManager interface { - Prepare(ctx context.Context, ref Ref, readonly bool) (Mount, error) + Prepare(ctx context.Context, ref Ref, readonly bool, g session.Group) (Mount, error) Commit(ctx context.Context, mount Mount) (Ref, error) } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go index b1972ad3ddaba1241236e2966c9581fb0f758f7e..4f8b6694f079d956c4a1bf11553b93d0ca2672ed 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go @@ -24,9 +24,10 @@ type sourceOp struct { src source.SourceInstance sessM *session.Manager w worker.Worker + vtx solver.Vertex } -func NewSourceOp(_ solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm *source.Manager, sessM *session.Manager, w worker.Worker) (solver.Op, error) { +func NewSourceOp(vtx solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm *source.Manager, sessM *session.Manager, w worker.Worker) (solver.Op, error) { if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { return nil, err } @@ -36,6 +37,7 @@ func NewSourceOp(_ solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm *s w: w, sessM: sessM, platform: platform, + vtx: vtx, }, nil } @@ -49,7 +51,7 @@ func (s *sourceOp) instance(ctx context.Context) (source.SourceInstance, error) if err != nil { return nil, err } - src, err := s.sm.Resolve(ctx, id, s.sessM) + src, err := s.sm.Resolve(ctx, id, s.sessM, s.vtx) if err != nil { return nil, err } @@ -62,7 +64,7 @@ func (s *sourceOp) CacheMap(ctx context.Context, g session.Group, index int) (*s if err != nil { return nil, false, err } - k, done, err := src.CacheKey(ctx, g, index) + k, cacheOpts, done, err := src.CacheKey(ctx, g, index) if err != nil { return nil, false, err } @@ -76,6 +78,7 @@ func (s *sourceOp) CacheMap(ctx context.Context, g session.Group, index int) (*s return &solver.CacheMap{ // TODO: add os/arch Digest: dgst, + Opts: cacheOpts, }, done, nil } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/result.go b/vendor/github.com/moby/buildkit/solver/llbsolver/result.go index dc96e4d00f208c07353fe8a6f4b0f11ad67c0f23..4c72353c9854b875e8d4eac8945892d2c23744ee 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/result.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/result.go @@ -6,7 +6,9 @@ import ( "path" "github.com/moby/buildkit/cache/contenthash" + "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -19,8 +21,19 @@ type Selector struct { FollowLinks bool } +func UnlazyResultFunc(ctx context.Context, res solver.Result, g session.Group) error { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return errors.Errorf("invalid reference: %T", res) + } + if ref.ImmutableRef == nil { + return nil + } + return ref.ImmutableRef.Extract(ctx, g) +} + func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { - return func(ctx context.Context, res solver.Result) (digest.Digest, error) { + return func(ctx context.Context, res solver.Result, s session.Group) (digest.Digest, error) { ref, ok := res.Sys().(*worker.WorkerRef) if !ok { return "", errors.Errorf("invalid reference: %T", res) @@ -35,25 +48,23 @@ func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { eg, ctx := errgroup.WithContext(ctx) for i, sel := range selectors { - // FIXME(tonistiigi): enabling this parallelization seems to create wrong results for some big inputs(like gobuild) - // func(i int) { - // eg.Go(func() error { - if !sel.Wildcard { - dgst, err := contenthash.Checksum(ctx, ref.ImmutableRef, path.Join("/", sel.Path), sel.FollowLinks) - if err != nil { - return "", err - } - dgsts[i] = []byte(dgst) - } else { - dgst, err := contenthash.ChecksumWildcard(ctx, ref.ImmutableRef, path.Join("/", sel.Path), sel.FollowLinks) - if err != nil { - return "", err + i, sel := i, sel + eg.Go(func() error { + if !sel.Wildcard { + dgst, err := contenthash.Checksum(ctx, ref.ImmutableRef, path.Join("/", sel.Path), sel.FollowLinks, s) + if err != nil { + return err + } + dgsts[i] = []byte(dgst) + } else { + dgst, err := contenthash.ChecksumWildcard(ctx, ref.ImmutableRef, path.Join("/", sel.Path), sel.FollowLinks, s) + if err != nil { + return err + } + dgsts[i] = []byte(dgst) } - dgsts[i] = []byte(dgst) - } - // return nil - // }) - // }(i) + return nil + }) } if err := eg.Wait(); err != nil { @@ -64,11 +75,13 @@ func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { } } -func workerRefConverter(ctx context.Context, res solver.Result) (*solver.Remote, error) { - ref, ok := res.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid result: %T", res.Sys()) - } +func workerRefConverter(g session.Group) func(ctx context.Context, res solver.Result) (*solver.Remote, error) { + return func(ctx context.Context, res solver.Result) (*solver.Remote, error) { + ref, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid result: %T", res.Sys()) + } - return ref.Worker.GetRemote(ctx, ref.ImmutableRef, true) + return ref.GetRemote(ctx, true, compression.Default, g) + } } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go index 3d4f2da07378fcfa06080579d57eda97bf489c42..d3ef585284ce9dabc5106eacb12afa730bd9c42f 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go @@ -16,6 +16,7 @@ import ( "github.com/moby/buildkit/frontend/gateway" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/entitlements" "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/worker" @@ -106,7 +107,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro var res *frontend.Result if s.gatewayForwarder != nil && req.Definition == nil && req.Frontend == "" { - fwd := gateway.NewBridgeForwarder(ctx, s.Bridge(j), s.workerController, req.FrontendInputs, sessionID) + fwd := gateway.NewBridgeForwarder(ctx, s.Bridge(j), s.workerController, req.FrontendInputs, sessionID, s.sm) defer fwd.Discard() if err := s.gatewayForwarder.RegisterBuild(ctx, id, fwd); err != nil { return nil, err @@ -168,7 +169,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro } inp.Ref = workerRef.ImmutableRef - dt, err := inlineCache(ctx, exp.CacheExporter, r) + dt, err := inlineCache(ctx, exp.CacheExporter, r, session.NewGroup(sessionID)) if err != nil { return nil, err } @@ -192,7 +193,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro } m[k] = workerRef.ImmutableRef - dt, err := inlineCache(ctx, exp.CacheExporter, r) + dt, err := inlineCache(ctx, exp.CacheExporter, r, session.NewGroup(sessionID)) if err != nil { return nil, err } @@ -212,6 +213,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro } } + g := session.NewGroup(j.SessionID) var cacheExporterResponse map[string]string if e := exp.CacheExporter; e != nil { if err := inBuilderContext(ctx, j, "exporting cache", "", func(ctx context.Context, _ session.Group) error { @@ -223,8 +225,9 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro } // all keys have same export chain so exporting others is not needed _, err = r.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{ - Convert: workerRefConverter, + Convert: workerRefConverter(g), Mode: exp.CacheExportMode, + Session: g, }) return err }); err != nil { @@ -258,7 +261,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro }, nil } -func inlineCache(ctx context.Context, e remotecache.Exporter, res solver.CachedResult) ([]byte, error) { +func inlineCache(ctx context.Context, e remotecache.Exporter, res solver.CachedResult, g session.Group) ([]byte, error) { if efl, ok := e.(interface { ExportForLayers([]digest.Digest) ([]byte, error) }); ok { @@ -267,7 +270,7 @@ func inlineCache(ctx context.Context, e remotecache.Exporter, res solver.CachedR return nil, errors.Errorf("invalid reference: %T", res.Sys()) } - remote, err := workerRef.Worker.GetRemote(ctx, workerRef.ImmutableRef, true) + remote, err := workerRef.GetRemote(ctx, true, compression.Default, g) if err != nil || remote == nil { return nil, nil } @@ -278,8 +281,9 @@ func inlineCache(ctx context.Context, e remotecache.Exporter, res solver.CachedR } if _, err := res.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{ - Convert: workerRefConverter, + Convert: workerRefConverter(g), Mode: solver.CacheExportModeMin, + Session: g, }); err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/solver/memorycachestorage.go b/vendor/github.com/moby/buildkit/solver/memorycachestorage.go index e0e58f0a7b62602ef4f9c4854c6e03175c58493e..6754d48908b35a63496f74d862535f5976bdd410 100644 --- a/vendor/github.com/moby/buildkit/solver/memorycachestorage.go +++ b/vendor/github.com/moby/buildkit/solver/memorycachestorage.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "github.com/moby/buildkit/session" "github.com/pkg/errors" ) @@ -297,7 +298,7 @@ func (s *inMemoryResultStore) Load(ctx context.Context, res CacheResult) (Result return v.(Result), nil } -func (s *inMemoryResultStore) LoadRemote(ctx context.Context, res CacheResult) (*Remote, error) { +func (s *inMemoryResultStore) LoadRemote(_ context.Context, _ CacheResult, _ session.Group) (*Remote, error) { return nil, nil } diff --git a/vendor/github.com/moby/buildkit/solver/pb/caps.go b/vendor/github.com/moby/buildkit/solver/pb/caps.go index 43a18176be72a4a2c905f7840fd39e3456f5a547..86552b21fa4ee8baa17b23f956d3109e15118dff 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/caps.go +++ b/vendor/github.com/moby/buildkit/solver/pb/caps.go @@ -22,7 +22,7 @@ const ( CapSourceGit apicaps.CapID = "source.git" CapSourceGitKeepDir apicaps.CapID = "source.git.keepgitdir" CapSourceGitFullURL apicaps.CapID = "source.git.fullurl" - CapSourceGitHttpAuth apicaps.CapID = "source.git.httpauth" + CapSourceGitHTTPAuth apicaps.CapID = "source.git.httpauth" CapSourceHTTP apicaps.CapID = "source.http" CapSourceHTTPChecksum apicaps.CapID = "source.http.checksum" @@ -133,7 +133,7 @@ func init() { }) Caps.Init(apicaps.Cap{ - ID: CapSourceGitHttpAuth, + ID: CapSourceGitHTTPAuth, Enabled: true, Status: apicaps.CapStatusExperimental, }) diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go index 90e549d83342d9a5de6fd4333ae4d4d5e5485ae8..93023b37f46ef55780bbc0a9707eef81f6f6e051 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go @@ -467,6 +467,7 @@ type Meta struct { User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv,proto3" json:"proxy_env,omitempty"` ExtraHosts []*HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` + Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"` } func (m *Meta) Reset() { *m = Meta{} } @@ -540,6 +541,13 @@ func (m *Meta) GetExtraHosts() []*HostIP { return nil } +func (m *Meta) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + // Mount specifies how to mount an input Op as a filesystem. type Mount struct { Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` @@ -551,6 +559,7 @@ type Mount struct { CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt,proto3" json:"cacheOpt,omitempty"` SecretOpt *SecretOpt `protobuf:"bytes,21,opt,name=secretOpt,proto3" json:"secretOpt,omitempty"` SSHOpt *SSHOpt `protobuf:"bytes,22,opt,name=SSHOpt,proto3" json:"SSHOpt,omitempty"` + ResultID string `protobuf:"bytes,23,opt,name=resultID,proto3" json:"resultID,omitempty"` } func (m *Mount) Reset() { *m = Mount{} } @@ -631,6 +640,13 @@ func (m *Mount) GetSSHOpt() *SSHOpt { return nil } +func (m *Mount) GetResultID() string { + if m != nil { + return m.ResultID + } + return "" +} + // CacheOpt defines options specific to cache mounts type CacheOpt struct { // ID is an optional namespace for the mount @@ -2316,144 +2332,146 @@ func init() { func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) } var fileDescriptor_8de16154b2733812 = []byte{ - // 2189 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x1b, 0xc9, - 0xf1, 0x17, 0xdf, 0x64, 0x51, 0x92, 0xf9, 0xef, 0xf5, 0xee, 0x9f, 0xab, 0x38, 0x92, 0x76, 0xec, - 0x2c, 0x64, 0xd9, 0xa6, 0x00, 0x2d, 0xb0, 0x5e, 0x2c, 0x82, 0x20, 0xe2, 0xc3, 0x10, 0xd7, 0xb6, - 0x28, 0x34, 0xfd, 0xc8, 0xcd, 0x18, 0x0d, 0x9b, 0xd4, 0x40, 0xe4, 0xf4, 0xa0, 0xa7, 0x69, 0x8b, - 0x97, 0x1c, 0xfc, 0x09, 0x16, 0x08, 0x90, 0x5b, 0x02, 0xe4, 0x12, 0x20, 0xf7, 0x5c, 0x73, 0xdf, - 0xe3, 0x22, 0xc8, 0x61, 0x91, 0xc3, 0x26, 0xb0, 0x3f, 0x47, 0x80, 0xa0, 0xaa, 0x7b, 0x1e, 0x94, - 0x65, 0xd8, 0x46, 0x82, 0x9c, 0xd8, 0xfd, 0xab, 0x5f, 0x57, 0x57, 0x57, 0x55, 0xd7, 0x54, 0x13, - 0x6a, 0x32, 0x8c, 0x5a, 0xa1, 0x92, 0x5a, 0xb2, 0x7c, 0x78, 0xb2, 0x71, 0x67, 0xe2, 0xeb, 0xd3, - 0xf9, 0x49, 0xcb, 0x93, 0xb3, 0xbd, 0x89, 0x9c, 0xc8, 0x3d, 0x12, 0x9d, 0xcc, 0xc7, 0x34, 0xa3, - 0x09, 0x8d, 0xcc, 0x12, 0xe7, 0x0f, 0x79, 0xc8, 0x0f, 0x42, 0xf6, 0x19, 0x94, 0xfd, 0x20, 0x9c, - 0xeb, 0xa8, 0x99, 0xdb, 0x2e, 0xec, 0xd4, 0xf7, 0x6b, 0xad, 0xf0, 0xa4, 0xd5, 0x47, 0x84, 0x5b, - 0x01, 0xdb, 0x86, 0xa2, 0x38, 0x17, 0x5e, 0x33, 0xbf, 0x9d, 0xdb, 0xa9, 0xef, 0x03, 0x12, 0x7a, - 0xe7, 0xc2, 0x1b, 0x84, 0x87, 0x2b, 0x9c, 0x24, 0xec, 0x73, 0x28, 0x47, 0x72, 0xae, 0x3c, 0xd1, - 0x2c, 0x10, 0x67, 0x15, 0x39, 0x43, 0x42, 0x88, 0x65, 0xa5, 0xa8, 0x69, 0xec, 0x4f, 0x45, 0xb3, - 0x98, 0x6a, 0xba, 0xe7, 0x4f, 0x0d, 0x87, 0x24, 0xec, 0x3a, 0x94, 0x4e, 0xe6, 0xfe, 0x74, 0xd4, - 0x2c, 0x11, 0xa5, 0x8e, 0x94, 0x36, 0x02, 0xc4, 0x31, 0x32, 0xb6, 0x03, 0xd5, 0x70, 0xea, 0xea, - 0xb1, 0x54, 0xb3, 0x26, 0xa4, 0x1b, 0x1e, 0x5b, 0x8c, 0x27, 0x52, 0x76, 0x17, 0xea, 0x9e, 0x0c, - 0x22, 0xad, 0x5c, 0x3f, 0xd0, 0x51, 0xb3, 0x4e, 0xe4, 0x8f, 0x91, 0xfc, 0x54, 0xaa, 0x33, 0xa1, - 0x3a, 0xa9, 0x90, 0x67, 0x99, 0xed, 0x22, 0xe4, 0x65, 0xe8, 0xfc, 0x36, 0x07, 0xd5, 0x58, 0x2b, - 0x73, 0x60, 0xf5, 0x40, 0x79, 0xa7, 0xbe, 0x16, 0x9e, 0x9e, 0x2b, 0xd1, 0xcc, 0x6d, 0xe7, 0x76, - 0x6a, 0x7c, 0x09, 0x63, 0xeb, 0x90, 0x1f, 0x0c, 0xc9, 0x51, 0x35, 0x9e, 0x1f, 0x0c, 0x59, 0x13, - 0x2a, 0x4f, 0x5c, 0xe5, 0xbb, 0x81, 0x26, 0xcf, 0xd4, 0x78, 0x3c, 0x65, 0xd7, 0xa0, 0x36, 0x18, - 0x3e, 0x11, 0x2a, 0xf2, 0x65, 0x40, 0xfe, 0xa8, 0xf1, 0x14, 0x60, 0x9b, 0x00, 0x83, 0xe1, 0x3d, - 0xe1, 0xa2, 0xd2, 0xa8, 0x59, 0xda, 0x2e, 0xec, 0xd4, 0x78, 0x06, 0x71, 0x7e, 0x0d, 0x25, 0x8a, - 0x11, 0xfb, 0x06, 0xca, 0x23, 0x7f, 0x22, 0x22, 0x6d, 0xcc, 0x69, 0xef, 0x7f, 0xf7, 0xe3, 0xd6, - 0xca, 0xdf, 0x7f, 0xdc, 0xda, 0xcd, 0x24, 0x83, 0x0c, 0x45, 0xe0, 0xc9, 0x40, 0xbb, 0x7e, 0x20, - 0x54, 0xb4, 0x37, 0x91, 0x77, 0xcc, 0x92, 0x56, 0x97, 0x7e, 0xb8, 0xd5, 0xc0, 0x6e, 0x42, 0xc9, - 0x0f, 0x46, 0xe2, 0x9c, 0xec, 0x2f, 0xb4, 0x3f, 0xb2, 0xaa, 0xea, 0x83, 0xb9, 0x0e, 0xe7, 0xba, - 0x8f, 0x22, 0x6e, 0x18, 0xce, 0xef, 0x73, 0x50, 0x36, 0x39, 0xc0, 0xae, 0x41, 0x71, 0x26, 0xb4, - 0x4b, 0xfb, 0xd7, 0xf7, 0xab, 0xe8, 0xdb, 0x87, 0x42, 0xbb, 0x9c, 0x50, 0x4c, 0xaf, 0x99, 0x9c, - 0xa3, 0xef, 0xf3, 0x69, 0x7a, 0x3d, 0x44, 0x84, 0x5b, 0x01, 0xfb, 0x19, 0x54, 0x02, 0xa1, 0x5f, - 0x48, 0x75, 0x46, 0x3e, 0x5a, 0x37, 0x41, 0x3f, 0x12, 0xfa, 0xa1, 0x1c, 0x09, 0x1e, 0xcb, 0xd8, - 0x6d, 0xa8, 0x46, 0xc2, 0x9b, 0x2b, 0x5f, 0x2f, 0xc8, 0x5f, 0xeb, 0xfb, 0x0d, 0xca, 0x32, 0x8b, - 0x11, 0x39, 0x61, 0x38, 0x7f, 0xca, 0x41, 0x11, 0xcd, 0x60, 0x0c, 0x8a, 0xae, 0x9a, 0x98, 0xec, - 0xae, 0x71, 0x1a, 0xb3, 0x06, 0x14, 0x44, 0xf0, 0x9c, 0x2c, 0xaa, 0x71, 0x1c, 0x22, 0xe2, 0xbd, - 0x18, 0xd9, 0x18, 0xe1, 0x10, 0xd7, 0xcd, 0x23, 0xa1, 0x6c, 0x68, 0x68, 0xcc, 0x6e, 0x42, 0x2d, - 0x54, 0xf2, 0x7c, 0xf1, 0x0c, 0x57, 0x97, 0x32, 0x89, 0x87, 0x60, 0x2f, 0x78, 0xce, 0xab, 0xa1, - 0x1d, 0xb1, 0x5d, 0x00, 0x71, 0xae, 0x95, 0x7b, 0x28, 0x23, 0x1d, 0x35, 0xcb, 0x74, 0x76, 0xca, - 0x77, 0x04, 0xfa, 0xc7, 0x3c, 0x23, 0x75, 0xfe, 0x9a, 0x87, 0x12, 0xb9, 0x84, 0xed, 0x60, 0x04, - 0xc2, 0xb9, 0x09, 0x66, 0xa1, 0xcd, 0x6c, 0x04, 0x80, 0x62, 0x9d, 0x04, 0x00, 0xe3, 0xbe, 0x81, - 0xde, 0x98, 0x0a, 0x4f, 0x4b, 0x65, 0xd3, 0x2d, 0x99, 0xa3, 0xe9, 0x23, 0xcc, 0x08, 0x73, 0x1a, - 0x1a, 0xb3, 0x5b, 0x50, 0x96, 0x14, 0x46, 0x3a, 0xd0, 0x5b, 0x82, 0x6b, 0x29, 0xa8, 0x5c, 0x09, - 0x77, 0x24, 0x83, 0xe9, 0x82, 0x8e, 0x59, 0xe5, 0xc9, 0x9c, 0xdd, 0x82, 0x1a, 0xc5, 0xed, 0xd1, - 0x22, 0x14, 0xcd, 0x32, 0xc5, 0x61, 0x2d, 0x89, 0x29, 0x82, 0x3c, 0x95, 0xe3, 0x45, 0xf5, 0x5c, - 0xef, 0x54, 0x0c, 0x42, 0xdd, 0xbc, 0x9a, 0xfa, 0xab, 0x63, 0x31, 0x9e, 0x48, 0x51, 0x6d, 0x24, - 0x3c, 0x25, 0x34, 0x52, 0x3f, 0x26, 0xea, 0x9a, 0x0d, 0xaf, 0x01, 0x79, 0x2a, 0x67, 0x0e, 0x94, - 0x87, 0xc3, 0x43, 0x64, 0x7e, 0x92, 0x16, 0x12, 0x83, 0x70, 0x2b, 0x71, 0xfa, 0x50, 0x8d, 0xb7, - 0xc1, 0x5b, 0xd9, 0xef, 0xda, 0xfb, 0x9a, 0xef, 0x77, 0xd9, 0x1d, 0xa8, 0x44, 0xa7, 0xae, 0xf2, - 0x83, 0x09, 0xf9, 0x6e, 0x7d, 0xff, 0xa3, 0xc4, 0xaa, 0xa1, 0xc1, 0x51, 0x53, 0xcc, 0x71, 0x24, - 0xd4, 0x12, 0x33, 0xde, 0xd0, 0xd5, 0x80, 0xc2, 0xdc, 0x1f, 0x91, 0x9e, 0x35, 0x8e, 0x43, 0x44, - 0x26, 0xbe, 0xc9, 0xa5, 0x35, 0x8e, 0x43, 0x0c, 0xc8, 0x4c, 0x8e, 0x4c, 0xd9, 0x5b, 0xe3, 0x34, - 0x46, 0x1f, 0xcb, 0x50, 0xfb, 0x32, 0x70, 0xa7, 0xb1, 0x8f, 0xe3, 0xb9, 0x33, 0x8d, 0xcf, 0xf7, - 0x3f, 0xd9, 0xed, 0x37, 0x39, 0xa8, 0xc6, 0xb5, 0x1a, 0x0b, 0x8f, 0x3f, 0x12, 0x81, 0xf6, 0xc7, - 0xbe, 0x50, 0x76, 0xe3, 0x0c, 0xc2, 0xee, 0x40, 0xc9, 0xd5, 0x5a, 0xc5, 0xd7, 0xf9, 0xff, 0xb3, - 0x85, 0xbe, 0x75, 0x80, 0x92, 0x5e, 0xa0, 0xd5, 0x82, 0x1b, 0xd6, 0xc6, 0x57, 0x00, 0x29, 0x88, - 0xb6, 0x9e, 0x89, 0x85, 0xd5, 0x8a, 0x43, 0x76, 0x15, 0x4a, 0xcf, 0xdd, 0xe9, 0x5c, 0xd8, 0x1c, - 0x36, 0x93, 0xaf, 0xf3, 0x5f, 0xe5, 0x9c, 0xbf, 0xe4, 0xa1, 0x62, 0x0b, 0x3f, 0xbb, 0x0d, 0x15, - 0x2a, 0xfc, 0xd6, 0xa2, 0xcb, 0x2f, 0x46, 0x4c, 0x61, 0x7b, 0xc9, 0x17, 0x2d, 0x63, 0xa3, 0x55, - 0x65, 0xbe, 0x6c, 0xd6, 0xc6, 0xf4, 0xfb, 0x56, 0x18, 0x89, 0xb1, 0xfd, 0x74, 0xad, 0x23, 0xbb, - 0x2b, 0xc6, 0x7e, 0xe0, 0xa3, 0x7f, 0x38, 0x8a, 0xd8, 0xed, 0xf8, 0xd4, 0x45, 0xd2, 0xf8, 0x49, - 0x56, 0xe3, 0x9b, 0x87, 0xee, 0x43, 0x3d, 0xb3, 0xcd, 0x25, 0xa7, 0xbe, 0x91, 0x3d, 0xb5, 0xdd, - 0x92, 0xd4, 0x99, 0xef, 0x6e, 0xea, 0x85, 0xff, 0xc0, 0x7f, 0x5f, 0x02, 0xa4, 0x2a, 0xdf, 0xbf, - 0xb0, 0x38, 0x2f, 0x0b, 0x00, 0x83, 0x10, 0x4b, 0xe7, 0xc8, 0xa5, 0xfa, 0xbd, 0xea, 0x4f, 0x02, - 0xa9, 0xc4, 0x33, 0xba, 0xaa, 0xb4, 0xbe, 0xca, 0xeb, 0x06, 0xa3, 0x1b, 0xc3, 0x0e, 0xa0, 0x3e, - 0x12, 0x91, 0xa7, 0x7c, 0x4a, 0x28, 0xeb, 0xf4, 0x2d, 0x3c, 0x53, 0xaa, 0xa7, 0xd5, 0x4d, 0x19, - 0xc6, 0x57, 0xd9, 0x35, 0x6c, 0x1f, 0x56, 0xc5, 0x79, 0x28, 0x95, 0xb6, 0xbb, 0x98, 0xfe, 0xe0, - 0x8a, 0xe9, 0x34, 0x10, 0xa7, 0x9d, 0x78, 0x5d, 0xa4, 0x13, 0xe6, 0x42, 0xd1, 0x73, 0x43, 0xf3, - 0x71, 0xac, 0xef, 0x37, 0x2f, 0xec, 0xd7, 0x71, 0x43, 0xe3, 0xb4, 0xf6, 0x17, 0x78, 0xd6, 0x97, - 0xff, 0xd8, 0xba, 0x95, 0xf9, 0x22, 0xce, 0xe4, 0xc9, 0x62, 0x8f, 0xf2, 0xe5, 0xcc, 0xd7, 0x7b, - 0x73, 0xed, 0x4f, 0xf7, 0xdc, 0xd0, 0x47, 0x75, 0xb8, 0xb0, 0xdf, 0xe5, 0xa4, 0x7a, 0xe3, 0x17, - 0xd0, 0xb8, 0x68, 0xf7, 0x87, 0xc4, 0x60, 0xe3, 0x2e, 0xd4, 0x12, 0x3b, 0xde, 0xb5, 0xb0, 0x9a, - 0x0d, 0xde, 0x9f, 0x73, 0x50, 0x36, 0xb7, 0x8a, 0xdd, 0x85, 0xda, 0x54, 0x7a, 0x2e, 0x1a, 0x10, - 0xb7, 0x68, 0x9f, 0xa6, 0x97, 0xae, 0xf5, 0x20, 0x96, 0x19, 0xaf, 0xa6, 0x5c, 0x4c, 0x32, 0x3f, - 0x18, 0xcb, 0xf8, 0x16, 0xac, 0xa7, 0x8b, 0xfa, 0xc1, 0x58, 0x72, 0x23, 0xdc, 0xb8, 0x0f, 0xeb, - 0xcb, 0x2a, 0x2e, 0xb1, 0xf3, 0xfa, 0x72, 0xba, 0x52, 0x5d, 0x4e, 0x16, 0x65, 0xcd, 0xbe, 0x0b, - 0xb5, 0x04, 0x67, 0xbb, 0x6f, 0x1a, 0xbe, 0x9a, 0x5d, 0x99, 0xb1, 0xd5, 0x99, 0x02, 0xa4, 0xa6, - 0x61, 0xb1, 0xc2, 0x5e, 0x30, 0x70, 0x67, 0x71, 0x93, 0x95, 0xcc, 0xe9, 0xdb, 0xe6, 0x6a, 0x97, - 0x4c, 0x59, 0xe5, 0x34, 0x66, 0x2d, 0x80, 0x51, 0x72, 0x61, 0xdf, 0x72, 0x8d, 0x33, 0x0c, 0x67, - 0x00, 0xd5, 0xd8, 0x08, 0xb6, 0x0d, 0xf5, 0xc8, 0xee, 0x8c, 0x9d, 0x0f, 0x6e, 0x57, 0xe2, 0x59, - 0x08, 0x3b, 0x18, 0xe5, 0x06, 0x13, 0xb1, 0xd4, 0xc1, 0x70, 0x44, 0xb8, 0x15, 0x38, 0x4f, 0xa1, - 0x44, 0x00, 0x5e, 0xb3, 0x48, 0xbb, 0x4a, 0xdb, 0x66, 0xc8, 0x34, 0x07, 0x32, 0xa2, 0x6d, 0xdb, - 0x45, 0x4c, 0x44, 0x6e, 0x08, 0xec, 0x06, 0xb6, 0x20, 0x23, 0xeb, 0xd1, 0xcb, 0x78, 0x28, 0x76, - 0x7e, 0x0e, 0xd5, 0x18, 0xc6, 0x93, 0x3f, 0xf0, 0x03, 0x61, 0x4d, 0xa4, 0x31, 0x36, 0x91, 0x9d, - 0x53, 0x57, 0xb9, 0x9e, 0x16, 0xa6, 0x0d, 0x28, 0xf1, 0x14, 0x70, 0xae, 0x43, 0x3d, 0x73, 0x7b, - 0x30, 0xdd, 0x9e, 0x50, 0x18, 0xcd, 0x1d, 0x36, 0x13, 0xe7, 0x25, 0xb6, 0xb8, 0x71, 0xd7, 0xf2, - 0x53, 0x80, 0x53, 0xad, 0xc3, 0x67, 0xd4, 0xc6, 0x58, 0xdf, 0xd7, 0x10, 0x21, 0x06, 0xdb, 0x82, - 0x3a, 0x4e, 0x22, 0x2b, 0x37, 0xf9, 0x4e, 0x2b, 0x22, 0x43, 0xf8, 0x09, 0xd4, 0xc6, 0xc9, 0xf2, - 0x82, 0x0d, 0x5d, 0xbc, 0xfa, 0x53, 0xa8, 0x06, 0xd2, 0xca, 0x4c, 0x57, 0x55, 0x09, 0x24, 0x89, - 0x9c, 0x5b, 0xf0, 0x7f, 0x6f, 0xf4, 0xe3, 0xec, 0x13, 0x28, 0x8f, 0xfd, 0xa9, 0xa6, 0xa2, 0x8f, - 0x8d, 0x9a, 0x9d, 0x39, 0xff, 0xca, 0x01, 0xa4, 0x91, 0xc5, 0x7c, 0xc5, 0xea, 0x8d, 0x9c, 0x55, - 0x53, 0xad, 0xa7, 0x50, 0x9d, 0xd9, 0x3a, 0x60, 0x63, 0x76, 0x6d, 0x39, 0x1b, 0x5a, 0x71, 0x99, - 0x30, 0x15, 0x62, 0xdf, 0x56, 0x88, 0x0f, 0xe9, 0x99, 0x93, 0x1d, 0xa8, 0x19, 0xc9, 0xbe, 0x7d, - 0x20, 0xbd, 0x68, 0xdc, 0x4a, 0x36, 0xee, 0xc3, 0xda, 0xd2, 0x96, 0xef, 0xf9, 0x4d, 0x48, 0xeb, - 0x59, 0xf6, 0x96, 0xdd, 0x86, 0xb2, 0x69, 0x22, 0x31, 0x25, 0x70, 0x64, 0xd5, 0xd0, 0x98, 0x3a, - 0x86, 0xe3, 0xf8, 0x05, 0xd2, 0x3f, 0x76, 0xf6, 0xa1, 0x6c, 0x9e, 0x58, 0x6c, 0x07, 0x2a, 0xae, - 0x67, 0xae, 0x63, 0xa6, 0x24, 0xa0, 0xf0, 0x80, 0x60, 0x1e, 0x8b, 0x9d, 0xbf, 0xe5, 0x01, 0x52, - 0xfc, 0x03, 0xba, 0xd2, 0xaf, 0x61, 0x3d, 0x12, 0x9e, 0x0c, 0x46, 0xae, 0x5a, 0x90, 0xd4, 0x3e, - 0x25, 0x2e, 0x5b, 0x72, 0x81, 0x99, 0xe9, 0x50, 0x0b, 0xef, 0xee, 0x50, 0x77, 0xa0, 0xe8, 0xc9, - 0x70, 0x61, 0x3f, 0x14, 0x6c, 0xf9, 0x20, 0x1d, 0x19, 0x2e, 0xf0, 0x41, 0x89, 0x0c, 0xd6, 0x82, - 0xf2, 0xec, 0x8c, 0x1e, 0x9d, 0xa6, 0x61, 0xbf, 0xba, 0xcc, 0x7d, 0x78, 0x86, 0x63, 0x7c, 0xa2, - 0x1a, 0x16, 0xbb, 0x05, 0xa5, 0xd9, 0xd9, 0xc8, 0x57, 0xd4, 0xdb, 0xd6, 0x4d, 0x67, 0x98, 0xa5, - 0x77, 0x7d, 0x85, 0x0f, 0x51, 0xe2, 0x30, 0x07, 0xf2, 0x6a, 0xd6, 0xac, 0x10, 0xb3, 0x71, 0xc1, - 0x9b, 0xb3, 0xc3, 0x15, 0x9e, 0x57, 0xb3, 0x76, 0x15, 0xca, 0xc6, 0xaf, 0xce, 0x1f, 0x0b, 0xb0, - 0xbe, 0x6c, 0x25, 0xe6, 0x41, 0xa4, 0xbc, 0x38, 0x0f, 0x22, 0xe5, 0x25, 0xcd, 0x7b, 0x3e, 0xd3, - 0xbc, 0x3b, 0x50, 0x92, 0x2f, 0x02, 0xa1, 0xb2, 0xaf, 0xeb, 0xce, 0xa9, 0x7c, 0x11, 0x60, 0x9b, - 0x6a, 0x44, 0x4b, 0x5d, 0x5f, 0xc9, 0x76, 0x7d, 0x37, 0x60, 0x6d, 0x2c, 0xa7, 0x53, 0xf9, 0x62, - 0xb8, 0x98, 0x4d, 0xfd, 0xe0, 0xcc, 0xb6, 0x7e, 0xcb, 0x20, 0xdb, 0x81, 0x2b, 0x23, 0x5f, 0xa1, - 0x39, 0x1d, 0x19, 0x68, 0x11, 0xd0, 0x7b, 0x05, 0x79, 0x17, 0x61, 0xf6, 0x0d, 0x6c, 0xbb, 0x5a, - 0x8b, 0x59, 0xa8, 0x1f, 0x07, 0xa1, 0xeb, 0x9d, 0x75, 0xa5, 0x47, 0x77, 0x76, 0x16, 0xba, 0xda, - 0x3f, 0xf1, 0xa7, 0xf8, 0x34, 0xab, 0xd0, 0xd2, 0x77, 0xf2, 0xd8, 0xe7, 0xb0, 0xee, 0x29, 0xe1, - 0x6a, 0xd1, 0x15, 0x91, 0x3e, 0x76, 0xf5, 0x69, 0xb3, 0x4a, 0x2b, 0x2f, 0xa0, 0x78, 0x06, 0x17, - 0xad, 0x7d, 0xea, 0x4f, 0x47, 0x9e, 0xab, 0x46, 0xcd, 0x9a, 0x39, 0xc3, 0x12, 0xc8, 0x5a, 0xc0, - 0x08, 0xe8, 0xcd, 0x42, 0xbd, 0x48, 0xa8, 0x40, 0xd4, 0x4b, 0x24, 0x58, 0x38, 0xb5, 0x3f, 0x13, - 0x91, 0x76, 0x67, 0x21, 0xfd, 0x2b, 0x50, 0xe0, 0x29, 0xe0, 0x7c, 0x9b, 0x83, 0xc6, 0xc5, 0x14, - 0x41, 0x07, 0x87, 0x68, 0xa6, 0xbd, 0x6c, 0x38, 0x4e, 0x9c, 0x9e, 0xcf, 0x38, 0x3d, 0xfe, 0x42, - 0x15, 0x32, 0x5f, 0xa8, 0x24, 0x80, 0xc5, 0xb7, 0x07, 0x70, 0xc9, 0xa4, 0xd2, 0x45, 0x93, 0x7e, - 0x97, 0x83, 0x2b, 0x17, 0xd2, 0xf0, 0xbd, 0x2d, 0xda, 0x86, 0xfa, 0xcc, 0x3d, 0x13, 0xc7, 0xae, - 0xa2, 0xe0, 0x16, 0x4c, 0x0b, 0x97, 0x81, 0xfe, 0x0b, 0xf6, 0x05, 0xb0, 0x9a, 0xcd, 0xfd, 0x4b, - 0x6d, 0x8b, 0x43, 0x79, 0x24, 0xf5, 0x3d, 0x39, 0xb7, 0x5f, 0xbf, 0x38, 0x94, 0x31, 0xf8, 0x66, - 0xc0, 0x0b, 0x97, 0x04, 0xdc, 0x39, 0x82, 0x6a, 0x6c, 0x20, 0xdb, 0xb2, 0x4f, 0xf5, 0x5c, 0xfa, - 0x97, 0xd1, 0xe3, 0x48, 0x28, 0xb4, 0xdd, 0xbc, 0xdb, 0x3f, 0x83, 0xd2, 0x44, 0xc9, 0x79, 0x68, - 0x6b, 0xeb, 0x12, 0xc3, 0x48, 0x9c, 0x21, 0x54, 0x2c, 0xc2, 0x76, 0xa1, 0x7c, 0xb2, 0x38, 0x8a, - 0x9b, 0x0f, 0x7b, 0xb1, 0x71, 0x3e, 0xb2, 0x0c, 0xac, 0x16, 0x86, 0xc1, 0xae, 0x42, 0xf1, 0x64, - 0xd1, 0xef, 0x9a, 0x07, 0x19, 0xd6, 0x1c, 0x9c, 0xb5, 0xcb, 0xc6, 0x20, 0xe7, 0x01, 0xac, 0x66, - 0xd7, 0xa1, 0x53, 0x32, 0x4d, 0x0d, 0x8d, 0xd3, 0xe2, 0x9a, 0x7f, 0x47, 0x71, 0xdd, 0xdd, 0x81, - 0x8a, 0xfd, 0x53, 0x84, 0xd5, 0xa0, 0xf4, 0xf8, 0x68, 0xd8, 0x7b, 0xd4, 0x58, 0x61, 0x55, 0x28, - 0x1e, 0x0e, 0x86, 0x8f, 0x1a, 0x39, 0x1c, 0x1d, 0x0d, 0x8e, 0x7a, 0x8d, 0xfc, 0xee, 0x4d, 0x58, - 0xcd, 0xfe, 0x2d, 0xc2, 0xea, 0x50, 0x19, 0x1e, 0x1c, 0x75, 0xdb, 0x83, 0x5f, 0x35, 0x56, 0xd8, - 0x2a, 0x54, 0xfb, 0x47, 0xc3, 0x5e, 0xe7, 0x31, 0xef, 0x35, 0x72, 0xbb, 0xbf, 0x84, 0x5a, 0xf2, - 0x72, 0x47, 0x0d, 0xed, 0xfe, 0x51, 0xb7, 0xb1, 0xc2, 0x00, 0xca, 0xc3, 0x5e, 0x87, 0xf7, 0x50, - 0x6f, 0x05, 0x0a, 0xc3, 0xe1, 0x61, 0x23, 0x8f, 0xbb, 0x76, 0x0e, 0x3a, 0x87, 0xbd, 0x46, 0x01, - 0x87, 0x8f, 0x1e, 0x1e, 0xdf, 0x1b, 0x36, 0x8a, 0xbb, 0x5f, 0xc2, 0x95, 0x0b, 0x2f, 0x67, 0x5a, - 0x7d, 0x78, 0xc0, 0x7b, 0xa8, 0xa9, 0x0e, 0x95, 0x63, 0xde, 0x7f, 0x72, 0xf0, 0xa8, 0xd7, 0xc8, - 0xa1, 0xe0, 0xc1, 0xa0, 0x73, 0xbf, 0xd7, 0x6d, 0xe4, 0xdb, 0xd7, 0xbe, 0x7b, 0xb5, 0x99, 0xfb, - 0xfe, 0xd5, 0x66, 0xee, 0x87, 0x57, 0x9b, 0xb9, 0x7f, 0xbe, 0xda, 0xcc, 0x7d, 0xfb, 0x7a, 0x73, - 0xe5, 0xfb, 0xd7, 0x9b, 0x2b, 0x3f, 0xbc, 0xde, 0x5c, 0x39, 0x29, 0xd3, 0x9f, 0x94, 0x5f, 0xfc, - 0x3b, 0x00, 0x00, 0xff, 0xff, 0x7e, 0x60, 0x46, 0x7d, 0xe4, 0x14, 0x00, 0x00, + // 2217 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x1b, 0xc7, + 0x15, 0x17, 0xbf, 0xc9, 0x47, 0x49, 0x66, 0x27, 0x4e, 0xc2, 0xa8, 0xae, 0xa4, 0x6c, 0xdc, 0x40, + 0x96, 0x6d, 0x0a, 0x50, 0x80, 0x38, 0x08, 0x8a, 0xa2, 0xe2, 0x87, 0x21, 0xc6, 0xb6, 0x28, 0x0c, + 0xfd, 0xd1, 0x9b, 0xb1, 0x5a, 0x0e, 0xa9, 0x85, 0xc8, 0x9d, 0xc5, 0xec, 0xd0, 0x16, 0x2f, 0x3d, + 0xf8, 0x2f, 0x08, 0x50, 0xa0, 0xb7, 0x16, 0xe8, 0xa5, 0x7f, 0x41, 0xaf, 0x3d, 0x16, 0xc8, 0x31, + 0x87, 0x1e, 0x82, 0x1e, 0xd2, 0xc2, 0xbe, 0xf7, 0x3f, 0x28, 0x50, 0xbc, 0x37, 0xb3, 0x1f, 0x94, + 0x65, 0xd8, 0x46, 0x8b, 0x9e, 0x76, 0xe6, 0xbd, 0xdf, 0xbc, 0x79, 0xf3, 0xbe, 0xe6, 0xcd, 0x42, + 0x4d, 0x86, 0x51, 0x2b, 0x54, 0x52, 0x4b, 0x96, 0x0f, 0x4f, 0x36, 0x6e, 0x4f, 0x7c, 0x7d, 0x3a, + 0x3f, 0x69, 0x79, 0x72, 0xb6, 0x37, 0x91, 0x13, 0xb9, 0x47, 0xac, 0x93, 0xf9, 0x98, 0x66, 0x34, + 0xa1, 0x91, 0x59, 0xe2, 0xfc, 0x31, 0x0f, 0xf9, 0x41, 0xc8, 0x3e, 0x85, 0xb2, 0x1f, 0x84, 0x73, + 0x1d, 0x35, 0x73, 0xdb, 0x85, 0x9d, 0xfa, 0x7e, 0xad, 0x15, 0x9e, 0xb4, 0xfa, 0x48, 0xe1, 0x96, + 0xc1, 0xb6, 0xa1, 0x28, 0xce, 0x85, 0xd7, 0xcc, 0x6f, 0xe7, 0x76, 0xea, 0xfb, 0x80, 0x80, 0xde, + 0xb9, 0xf0, 0x06, 0xe1, 0xe1, 0x0a, 0x27, 0x0e, 0xfb, 0x1c, 0xca, 0x91, 0x9c, 0x2b, 0x4f, 0x34, + 0x0b, 0x84, 0x59, 0x45, 0xcc, 0x90, 0x28, 0x84, 0xb2, 0x5c, 0x94, 0x34, 0xf6, 0xa7, 0xa2, 0x59, + 0x4c, 0x25, 0xdd, 0xf5, 0xa7, 0x06, 0x43, 0x1c, 0xf6, 0x19, 0x94, 0x4e, 0xe6, 0xfe, 0x74, 0xd4, + 0x2c, 0x11, 0xa4, 0x8e, 0x90, 0x36, 0x12, 0x08, 0x63, 0x78, 0x6c, 0x07, 0xaa, 0xe1, 0xd4, 0xd5, + 0x63, 0xa9, 0x66, 0x4d, 0x48, 0x37, 0x3c, 0xb6, 0x34, 0x9e, 0x70, 0xd9, 0x1d, 0xa8, 0x7b, 0x32, + 0x88, 0xb4, 0x72, 0xfd, 0x40, 0x47, 0xcd, 0x3a, 0x81, 0x3f, 0x44, 0xf0, 0x13, 0xa9, 0xce, 0x84, + 0xea, 0xa4, 0x4c, 0x9e, 0x45, 0xb6, 0x8b, 0x90, 0x97, 0xa1, 0xf3, 0xbb, 0x1c, 0x54, 0x63, 0xa9, + 0xcc, 0x81, 0xd5, 0x03, 0xe5, 0x9d, 0xfa, 0x5a, 0x78, 0x7a, 0xae, 0x44, 0x33, 0xb7, 0x9d, 0xdb, + 0xa9, 0xf1, 0x25, 0x1a, 0x5b, 0x87, 0xfc, 0x60, 0x48, 0x86, 0xaa, 0xf1, 0xfc, 0x60, 0xc8, 0x9a, + 0x50, 0x79, 0xec, 0x2a, 0xdf, 0x0d, 0x34, 0x59, 0xa6, 0xc6, 0xe3, 0x29, 0xbb, 0x06, 0xb5, 0xc1, + 0xf0, 0xb1, 0x50, 0x91, 0x2f, 0x03, 0xb2, 0x47, 0x8d, 0xa7, 0x04, 0xb6, 0x09, 0x30, 0x18, 0xde, + 0x15, 0x2e, 0x0a, 0x8d, 0x9a, 0xa5, 0xed, 0xc2, 0x4e, 0x8d, 0x67, 0x28, 0xce, 0x6f, 0xa0, 0x44, + 0x3e, 0x62, 0xdf, 0x40, 0x79, 0xe4, 0x4f, 0x44, 0xa4, 0x8d, 0x3a, 0xed, 0xfd, 0xef, 0x7e, 0xdc, + 0x5a, 0xf9, 0xfb, 0x8f, 0x5b, 0xbb, 0x99, 0x60, 0x90, 0xa1, 0x08, 0x3c, 0x19, 0x68, 0xd7, 0x0f, + 0x84, 0x8a, 0xf6, 0x26, 0xf2, 0xb6, 0x59, 0xd2, 0xea, 0xd2, 0x87, 0x5b, 0x09, 0xec, 0x06, 0x94, + 0xfc, 0x60, 0x24, 0xce, 0x49, 0xff, 0x42, 0xfb, 0x03, 0x2b, 0xaa, 0x3e, 0x98, 0xeb, 0x70, 0xae, + 0xfb, 0xc8, 0xe2, 0x06, 0xe1, 0xfc, 0x21, 0x07, 0x65, 0x13, 0x03, 0xec, 0x1a, 0x14, 0x67, 0x42, + 0xbb, 0xb4, 0x7f, 0x7d, 0xbf, 0x8a, 0xb6, 0x7d, 0x20, 0xb4, 0xcb, 0x89, 0x8a, 0xe1, 0x35, 0x93, + 0x73, 0xb4, 0x7d, 0x3e, 0x0d, 0xaf, 0x07, 0x48, 0xe1, 0x96, 0xc1, 0x7e, 0x0e, 0x95, 0x40, 0xe8, + 0xe7, 0x52, 0x9d, 0x91, 0x8d, 0xd6, 0x8d, 0xd3, 0x8f, 0x84, 0x7e, 0x20, 0x47, 0x82, 0xc7, 0x3c, + 0x76, 0x0b, 0xaa, 0x91, 0xf0, 0xe6, 0xca, 0xd7, 0x0b, 0xb2, 0xd7, 0xfa, 0x7e, 0x83, 0xa2, 0xcc, + 0xd2, 0x08, 0x9c, 0x20, 0x9c, 0xbf, 0xe6, 0xa0, 0x88, 0x6a, 0x30, 0x06, 0x45, 0x57, 0x4d, 0x4c, + 0x74, 0xd7, 0x38, 0x8d, 0x59, 0x03, 0x0a, 0x22, 0x78, 0x46, 0x1a, 0xd5, 0x38, 0x0e, 0x91, 0xe2, + 0x3d, 0x1f, 0x59, 0x1f, 0xe1, 0x10, 0xd7, 0xcd, 0x23, 0xa1, 0xac, 0x6b, 0x68, 0xcc, 0x6e, 0x40, + 0x2d, 0x54, 0xf2, 0x7c, 0xf1, 0x14, 0x57, 0x97, 0x32, 0x81, 0x87, 0xc4, 0x5e, 0xf0, 0x8c, 0x57, + 0x43, 0x3b, 0x62, 0xbb, 0x00, 0xe2, 0x5c, 0x2b, 0xf7, 0x50, 0x46, 0x3a, 0x6a, 0x96, 0xe9, 0xec, + 0x14, 0xef, 0x48, 0xe8, 0x1f, 0xf3, 0x0c, 0x97, 0x6d, 0x40, 0xf5, 0x54, 0x46, 0x3a, 0x70, 0x67, + 0xa2, 0x59, 0xa1, 0xed, 0x92, 0xb9, 0xf3, 0xaf, 0x3c, 0x94, 0xc8, 0x5c, 0x6c, 0x07, 0xbd, 0x13, + 0xce, 0x8d, 0xa3, 0x0b, 0x6d, 0x66, 0xbd, 0x03, 0x14, 0x07, 0x89, 0x73, 0x30, 0x26, 0x36, 0xd0, + 0x52, 0x53, 0xe1, 0x69, 0xa9, 0x6c, 0x28, 0x26, 0x73, 0x3c, 0xd6, 0x08, 0xa3, 0xc5, 0x9c, 0x94, + 0xc6, 0xec, 0x26, 0x94, 0x25, 0xb9, 0x98, 0x0e, 0xfb, 0x06, 0xc7, 0x5b, 0x08, 0x0a, 0x57, 0xc2, + 0x1d, 0xc9, 0x60, 0xba, 0x20, 0x13, 0x54, 0x79, 0x32, 0x67, 0x37, 0xa1, 0x46, 0x3e, 0x7d, 0xb8, + 0x08, 0x45, 0xb3, 0x4c, 0x3e, 0x5a, 0x4b, 0xfc, 0x8d, 0x44, 0x9e, 0xf2, 0x31, 0x89, 0x3d, 0xd7, + 0x3b, 0x15, 0x83, 0x50, 0x37, 0xaf, 0xa6, 0xb6, 0xec, 0x58, 0x1a, 0x4f, 0xb8, 0x28, 0x36, 0x12, + 0x9e, 0x12, 0x1a, 0xa1, 0x1f, 0x12, 0x74, 0xcd, 0xba, 0xde, 0x10, 0x79, 0xca, 0x67, 0x0e, 0x94, + 0x87, 0xc3, 0x43, 0x44, 0x7e, 0x94, 0x16, 0x19, 0x43, 0xe1, 0x96, 0x63, 0xce, 0x10, 0xcd, 0xa7, + 0xba, 0xdf, 0x6d, 0x7e, 0x6c, 0x0c, 0x14, 0xcf, 0x9d, 0x3e, 0x54, 0x63, 0x15, 0x30, 0x9b, 0xfb, + 0x5d, 0x9b, 0xe7, 0xf9, 0x7e, 0x97, 0xdd, 0x86, 0x4a, 0x74, 0xea, 0x2a, 0x3f, 0x98, 0x90, 0x5d, + 0xd7, 0xf7, 0x3f, 0x48, 0x34, 0x1e, 0x1a, 0x3a, 0xee, 0x12, 0x63, 0x1c, 0x09, 0xb5, 0x44, 0xc5, + 0xd7, 0x64, 0x35, 0xa0, 0x30, 0xf7, 0x47, 0x24, 0x67, 0x8d, 0xe3, 0x10, 0x29, 0x13, 0xdf, 0xc4, + 0xe0, 0x1a, 0xc7, 0x21, 0x3a, 0x6b, 0x26, 0x47, 0xa6, 0x5c, 0xae, 0x71, 0x1a, 0xa3, 0xee, 0x32, + 0xd4, 0xbe, 0x0c, 0xdc, 0x69, 0x6c, 0xff, 0x78, 0xee, 0x4c, 0xe3, 0xb3, 0xff, 0x5f, 0x76, 0xfb, + 0x6d, 0x0e, 0xaa, 0x71, 0x8d, 0xc7, 0x82, 0xe5, 0x8f, 0x44, 0xa0, 0xfd, 0xb1, 0x2f, 0x94, 0xdd, + 0x38, 0x43, 0x61, 0xb7, 0xa1, 0xe4, 0x6a, 0xad, 0xe2, 0x32, 0xf0, 0x71, 0xf6, 0x82, 0x68, 0x1d, + 0x20, 0xa7, 0x17, 0x68, 0xb5, 0xe0, 0x06, 0xb5, 0xf1, 0x15, 0x40, 0x4a, 0x44, 0x5d, 0xcf, 0xc4, + 0xc2, 0x4a, 0xc5, 0x21, 0xbb, 0x0a, 0xa5, 0x67, 0xee, 0x74, 0x2e, 0x6c, 0x7c, 0x9b, 0xc9, 0xd7, + 0xf9, 0xaf, 0x72, 0xce, 0x5f, 0xf2, 0x50, 0xb1, 0x17, 0x06, 0xbb, 0x05, 0x15, 0xba, 0x30, 0xac, + 0x46, 0x97, 0x27, 0x4d, 0x0c, 0x61, 0x7b, 0xc9, 0x4d, 0x98, 0xd1, 0xd1, 0x8a, 0x32, 0x37, 0xa2, + 0xd5, 0x31, 0xbd, 0x17, 0x0b, 0x23, 0x31, 0xb6, 0x57, 0xde, 0x3a, 0xa2, 0xbb, 0x62, 0xec, 0x07, + 0x3e, 0xda, 0x87, 0x23, 0x8b, 0xdd, 0x8a, 0x4f, 0x5d, 0x24, 0x89, 0x1f, 0x65, 0x25, 0xbe, 0x7e, + 0xe8, 0x3e, 0xd4, 0x33, 0xdb, 0x5c, 0x72, 0xea, 0xeb, 0xd9, 0x53, 0xdb, 0x2d, 0x49, 0x9c, 0xb9, + 0xaf, 0x53, 0x2b, 0xfc, 0x17, 0xf6, 0xfb, 0x12, 0x20, 0x15, 0xf9, 0xee, 0x45, 0xc7, 0x79, 0x51, + 0x00, 0x18, 0x84, 0x58, 0x72, 0x47, 0x2e, 0xd5, 0xfd, 0x55, 0x7f, 0x12, 0x48, 0x25, 0x9e, 0x52, + 0x1a, 0xd3, 0xfa, 0x2a, 0xaf, 0x1b, 0x1a, 0x65, 0x0c, 0x3b, 0x80, 0xfa, 0x48, 0x44, 0x9e, 0xf2, + 0x29, 0xa0, 0xac, 0xd1, 0xb7, 0xf0, 0x4c, 0xa9, 0x9c, 0x56, 0x37, 0x45, 0x18, 0x5b, 0x65, 0xd7, + 0xb0, 0x7d, 0x58, 0x15, 0xe7, 0xa1, 0x54, 0xda, 0xee, 0x62, 0xfa, 0x8a, 0x2b, 0xa6, 0x43, 0x41, + 0x3a, 0xed, 0xc4, 0xeb, 0x22, 0x9d, 0x30, 0x17, 0x8a, 0x9e, 0x1b, 0x9a, 0x4b, 0xb5, 0xbe, 0xdf, + 0xbc, 0xb0, 0x5f, 0xc7, 0x0d, 0x8d, 0xd1, 0xda, 0x5f, 0xe0, 0x59, 0x5f, 0xfc, 0x63, 0xeb, 0x66, + 0xe6, 0x26, 0x9d, 0xc9, 0x93, 0xc5, 0x1e, 0xc5, 0xcb, 0x99, 0xaf, 0xf7, 0xe6, 0xda, 0x9f, 0xee, + 0xb9, 0xa1, 0x8f, 0xe2, 0x70, 0x61, 0xbf, 0xcb, 0x49, 0xf4, 0xc6, 0x2f, 0xa1, 0x71, 0x51, 0xef, + 0xf7, 0xf1, 0xc1, 0xc6, 0x1d, 0xa8, 0x25, 0x7a, 0xbc, 0x6d, 0x61, 0x35, 0xeb, 0xbc, 0x3f, 0xe7, + 0xa0, 0x6c, 0xb2, 0x8a, 0xdd, 0x81, 0xda, 0x54, 0x7a, 0x2e, 0x2a, 0x10, 0xb7, 0x76, 0x9f, 0xa4, + 0x49, 0xd7, 0xba, 0x1f, 0xf3, 0x8c, 0x55, 0x53, 0x2c, 0x06, 0x99, 0x1f, 0x8c, 0x65, 0x9c, 0x05, + 0xeb, 0xe9, 0xa2, 0x7e, 0x30, 0x96, 0xdc, 0x30, 0x37, 0xee, 0xc1, 0xfa, 0xb2, 0x88, 0x4b, 0xf4, + 0xfc, 0x6c, 0x39, 0x5c, 0xa9, 0x66, 0x27, 0x8b, 0xb2, 0x6a, 0xdf, 0x81, 0x5a, 0x42, 0x67, 0xbb, + 0xaf, 0x2b, 0xbe, 0x9a, 0x5d, 0x99, 0xd1, 0xd5, 0x99, 0x02, 0xa4, 0xaa, 0x61, 0xb1, 0xc2, 0x1e, + 0x92, 0xee, 0x51, 0xa3, 0x46, 0x32, 0xa7, 0x7b, 0xcf, 0xd5, 0x2e, 0xa9, 0xb2, 0xca, 0x69, 0xcc, + 0x5a, 0x00, 0xa3, 0x24, 0x61, 0xdf, 0x90, 0xc6, 0x19, 0x84, 0x33, 0x80, 0x6a, 0xac, 0x04, 0xdb, + 0x86, 0x7a, 0x64, 0x77, 0xc6, 0x8e, 0x09, 0xb7, 0x2b, 0xf1, 0x2c, 0x09, 0x3b, 0x1f, 0xe5, 0x06, + 0x13, 0xb1, 0xd4, 0xf9, 0x70, 0xa4, 0x70, 0xcb, 0x70, 0x9e, 0x40, 0x89, 0x08, 0x98, 0x66, 0x91, + 0x76, 0x95, 0xb6, 0x4d, 0x94, 0x69, 0x2a, 0x64, 0x44, 0xdb, 0xb6, 0x8b, 0x18, 0x88, 0xdc, 0x00, + 0xd8, 0x75, 0x6c, 0x5d, 0x46, 0xd6, 0xa2, 0x97, 0xe1, 0x90, 0xed, 0xfc, 0x02, 0xaa, 0x31, 0x19, + 0x4f, 0x7e, 0xdf, 0x0f, 0x84, 0x55, 0x91, 0xc6, 0xd8, 0x7c, 0x76, 0x4e, 0x5d, 0xe5, 0x7a, 0x5a, + 0x98, 0x16, 0xa1, 0xc4, 0x53, 0x82, 0xf3, 0x19, 0xd4, 0x33, 0xd9, 0x83, 0xe1, 0xf6, 0x98, 0xdc, + 0x68, 0x72, 0xd8, 0x4c, 0x9c, 0x17, 0xd8, 0x1a, 0xc7, 0xdd, 0xce, 0xcf, 0x00, 0x4e, 0xb5, 0x0e, + 0x9f, 0x52, 0xfb, 0x63, 0x6d, 0x5f, 0x43, 0x0a, 0x21, 0xd8, 0x16, 0xd4, 0x71, 0x12, 0x59, 0xbe, + 0x89, 0x77, 0x5a, 0x11, 0x19, 0xc0, 0x4f, 0xa1, 0x36, 0x4e, 0x96, 0x17, 0xac, 0xeb, 0xe2, 0xd5, + 0x9f, 0x40, 0x35, 0x90, 0x96, 0x67, 0xba, 0xb1, 0x4a, 0x20, 0x89, 0xe5, 0xdc, 0x84, 0x9f, 0xbc, + 0xd6, 0xc7, 0xb3, 0x8f, 0xa0, 0x3c, 0xf6, 0xa7, 0x9a, 0x8a, 0x3e, 0x36, 0x78, 0x76, 0xe6, 0xfc, + 0x3b, 0x07, 0x90, 0x7a, 0x16, 0xe3, 0x15, 0xab, 0x37, 0x62, 0x56, 0x4d, 0xb5, 0x9e, 0x42, 0x75, + 0x66, 0xeb, 0x80, 0xf5, 0xd9, 0xb5, 0xe5, 0x68, 0x68, 0xc5, 0x65, 0xc2, 0x54, 0x88, 0x7d, 0x5b, + 0x21, 0xde, 0xa7, 0xd7, 0x4e, 0x76, 0xa0, 0x46, 0x25, 0xfb, 0x66, 0x82, 0x34, 0xd1, 0xb8, 0xe5, + 0x6c, 0xdc, 0x83, 0xb5, 0xa5, 0x2d, 0xdf, 0xf1, 0x4e, 0x48, 0xeb, 0x59, 0x36, 0xcb, 0x6e, 0x41, + 0xd9, 0x34, 0x9f, 0x18, 0x12, 0x38, 0xb2, 0x62, 0x68, 0x4c, 0x1d, 0xc3, 0x71, 0xfc, 0x72, 0xe9, + 0x1f, 0x3b, 0xfb, 0x50, 0x36, 0x4f, 0x33, 0xb6, 0x03, 0x15, 0xd7, 0x33, 0xe9, 0x98, 0x29, 0x09, + 0xc8, 0x3c, 0x20, 0x32, 0x8f, 0xd9, 0xce, 0xdf, 0xf2, 0x00, 0x29, 0xfd, 0x3d, 0x3a, 0xd6, 0xaf, + 0x61, 0x3d, 0x12, 0x9e, 0x0c, 0x46, 0xae, 0x5a, 0x10, 0xd7, 0x3e, 0x41, 0x2e, 0x5b, 0x72, 0x01, + 0x99, 0xe9, 0x5e, 0x0b, 0x6f, 0xef, 0x5e, 0x77, 0xa0, 0xe8, 0xc9, 0x70, 0x61, 0x2f, 0x0a, 0xb6, + 0x7c, 0x90, 0x8e, 0x0c, 0x17, 0xf8, 0x10, 0x45, 0x04, 0x6b, 0x41, 0x79, 0x76, 0x46, 0x8f, 0x55, + 0xd3, 0xe8, 0x5f, 0x5d, 0xc6, 0x3e, 0x38, 0xc3, 0x31, 0x3e, 0x6d, 0x0d, 0x8a, 0xdd, 0x84, 0xd2, + 0xec, 0x6c, 0xe4, 0x2b, 0xea, 0x7b, 0xeb, 0xa6, 0x33, 0xcc, 0xc2, 0xbb, 0xbe, 0xc2, 0x07, 0x2c, + 0x61, 0x98, 0x03, 0x79, 0x35, 0xa3, 0x5e, 0xbf, 0x6e, 0x5e, 0x31, 0x19, 0x6b, 0xce, 0x0e, 0x57, + 0x78, 0x5e, 0xcd, 0xda, 0x55, 0x28, 0x1b, 0xbb, 0x3a, 0x7f, 0x2a, 0xc0, 0xfa, 0xb2, 0x96, 0x18, + 0x07, 0x91, 0xf2, 0xe2, 0x38, 0x88, 0x94, 0x97, 0x34, 0xf6, 0xf9, 0x4c, 0x63, 0xef, 0x40, 0x49, + 0x3e, 0x0f, 0x84, 0xca, 0xbe, 0xca, 0x3b, 0xa7, 0xf2, 0x79, 0x80, 0x6d, 0xaa, 0x61, 0x2d, 0x75, + 0x7d, 0x25, 0xdb, 0xf5, 0x5d, 0x87, 0xb5, 0xb1, 0x9c, 0x4e, 0xe5, 0xf3, 0xe1, 0x62, 0x36, 0xf5, + 0x83, 0x33, 0xdb, 0xfa, 0x2d, 0x13, 0xd9, 0x0e, 0x5c, 0x19, 0xf9, 0x0a, 0xd5, 0xe9, 0xc8, 0x40, + 0x8b, 0x80, 0xde, 0x39, 0x88, 0xbb, 0x48, 0x66, 0xdf, 0xc0, 0xb6, 0xab, 0xb5, 0x98, 0x85, 0xfa, + 0x51, 0x10, 0xba, 0xde, 0x59, 0x57, 0x7a, 0x94, 0xb3, 0xb3, 0xd0, 0xd5, 0xfe, 0x89, 0x3f, 0xc5, + 0x27, 0x5d, 0x85, 0x96, 0xbe, 0x15, 0xc7, 0x3e, 0x87, 0x75, 0x4f, 0x09, 0x57, 0x8b, 0xae, 0x88, + 0xf4, 0xb1, 0xab, 0x4f, 0x9b, 0x55, 0x5a, 0x79, 0x81, 0x8a, 0x67, 0x70, 0x51, 0xdb, 0x27, 0xfe, + 0x74, 0xe4, 0xb9, 0x6a, 0xd4, 0xac, 0x99, 0x33, 0x2c, 0x11, 0x59, 0x0b, 0x18, 0x11, 0x7a, 0xb3, + 0x50, 0x2f, 0x12, 0x28, 0x10, 0xf4, 0x12, 0x0e, 0x16, 0x4e, 0xed, 0xcf, 0x44, 0xa4, 0xdd, 0x59, + 0x48, 0x7f, 0x13, 0x0a, 0x3c, 0x25, 0x38, 0xdf, 0xe6, 0xa0, 0x71, 0x31, 0x44, 0xd0, 0xc0, 0x21, + 0xaa, 0x69, 0x93, 0x0d, 0xc7, 0x89, 0xd1, 0xf3, 0x19, 0xa3, 0xc7, 0x37, 0x54, 0x21, 0x73, 0x43, + 0x25, 0x0e, 0x2c, 0xbe, 0xd9, 0x81, 0x4b, 0x2a, 0x95, 0x2e, 0xaa, 0xf4, 0xfb, 0x1c, 0x5c, 0xb9, + 0x10, 0x86, 0xef, 0xac, 0xd1, 0x36, 0xd4, 0x67, 0xee, 0x99, 0x38, 0x76, 0x15, 0x39, 0xb7, 0x60, + 0x5a, 0xb8, 0x0c, 0xe9, 0x7f, 0xa0, 0x5f, 0x00, 0xab, 0xd9, 0xd8, 0xbf, 0x54, 0xb7, 0xd8, 0x95, + 0x47, 0x52, 0xdf, 0x95, 0x73, 0x7b, 0xfb, 0xc5, 0xae, 0x8c, 0x89, 0xaf, 0x3b, 0xbc, 0x70, 0x89, + 0xc3, 0x9d, 0x23, 0xa8, 0xc6, 0x0a, 0xb2, 0x2d, 0xfb, 0xc4, 0xcf, 0xa5, 0xbf, 0x9a, 0x1e, 0x45, + 0x42, 0xa1, 0xee, 0xe6, 0xbd, 0xff, 0x29, 0x94, 0x26, 0x4a, 0xce, 0x43, 0x5b, 0x5b, 0x97, 0x10, + 0x86, 0xe3, 0x0c, 0xa1, 0x62, 0x29, 0x6c, 0x17, 0xca, 0x27, 0x8b, 0xa3, 0xb8, 0xf9, 0xb0, 0x89, + 0x8d, 0xf3, 0x91, 0x45, 0x60, 0xb5, 0x30, 0x08, 0x76, 0x15, 0x8a, 0x27, 0x8b, 0x7e, 0xd7, 0x3c, + 0xc8, 0xb0, 0xe6, 0xe0, 0xac, 0x5d, 0x36, 0x0a, 0x39, 0xf7, 0x61, 0x35, 0xbb, 0x0e, 0x8d, 0x92, + 0x69, 0x6a, 0x68, 0x9c, 0x16, 0xd7, 0xfc, 0x5b, 0x8a, 0xeb, 0xee, 0x0e, 0x54, 0xec, 0xcf, 0x14, + 0x56, 0x83, 0xd2, 0xa3, 0xa3, 0x61, 0xef, 0x61, 0x63, 0x85, 0x55, 0xa1, 0x78, 0x38, 0x18, 0x3e, + 0x6c, 0xe4, 0x70, 0x74, 0x34, 0x38, 0xea, 0x35, 0xf2, 0xbb, 0x37, 0x60, 0x35, 0xfb, 0x3b, 0x85, + 0xd5, 0xa1, 0x32, 0x3c, 0x38, 0xea, 0xb6, 0x07, 0xbf, 0x6e, 0xac, 0xb0, 0x55, 0xa8, 0xf6, 0x8f, + 0x86, 0xbd, 0xce, 0x23, 0xde, 0x6b, 0xe4, 0x76, 0x7f, 0x05, 0xb5, 0xe4, 0x55, 0x8f, 0x12, 0xda, + 0xfd, 0xa3, 0x6e, 0x63, 0x85, 0x01, 0x94, 0x87, 0xbd, 0x0e, 0xef, 0xa1, 0xdc, 0x0a, 0x14, 0x86, + 0xc3, 0xc3, 0x46, 0x1e, 0x77, 0xed, 0x1c, 0x74, 0x0e, 0x7b, 0x8d, 0x02, 0x0e, 0x1f, 0x3e, 0x38, + 0xbe, 0x3b, 0x6c, 0x14, 0x77, 0xbf, 0x84, 0x2b, 0x17, 0x5e, 0xce, 0xb4, 0xfa, 0xf0, 0x80, 0xf7, + 0x50, 0x52, 0x1d, 0x2a, 0xc7, 0xbc, 0xff, 0xf8, 0xe0, 0x61, 0xaf, 0x91, 0x43, 0xc6, 0xfd, 0x41, + 0xe7, 0x5e, 0xaf, 0xdb, 0xc8, 0xb7, 0xaf, 0x7d, 0xf7, 0x72, 0x33, 0xf7, 0xfd, 0xcb, 0xcd, 0xdc, + 0x0f, 0x2f, 0x37, 0x73, 0xff, 0x7c, 0xb9, 0x99, 0xfb, 0xf6, 0xd5, 0xe6, 0xca, 0xf7, 0xaf, 0x36, + 0x57, 0x7e, 0x78, 0xb5, 0xb9, 0x72, 0x52, 0xa6, 0x9f, 0x9b, 0x5f, 0xfc, 0x27, 0x00, 0x00, 0xff, + 0xff, 0xa4, 0x50, 0x4f, 0x17, 0x1c, 0x15, 0x00, 0x00, } func (m *Op) Marshal() (dAtA []byte, err error) { @@ -2784,6 +2802,13 @@ func (m *Meta) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintOps(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x3a + } if len(m.ExtraHosts) > 0 { for iNdEx := len(m.ExtraHosts) - 1; iNdEx >= 0; iNdEx-- { { @@ -2865,6 +2890,15 @@ func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ResultID) > 0 { + i -= len(m.ResultID) + copy(dAtA[i:], m.ResultID) + i = encodeVarintOps(dAtA, i, uint64(len(m.ResultID))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } if m.SSHOpt != nil { { size, err := m.SSHOpt.MarshalToSizedBuffer(dAtA[:i]) @@ -4663,6 +4697,10 @@ func (m *Meta) Size() (n int) { n += 1 + l + sovOps(uint64(l)) } } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } return n } @@ -4704,6 +4742,10 @@ func (m *Mount) Size() (n int) { l = m.SSHOpt.Size() n += 2 + l + sovOps(uint64(l)) } + l = len(m.ResultID) + if l > 0 { + n += 2 + l + sovOps(uint64(l)) + } return n } @@ -6359,6 +6401,38 @@ func (m *Meta) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -6661,6 +6735,38 @@ func (m *Mount) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResultID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResultID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.proto b/vendor/github.com/moby/buildkit/solver/pb/ops.proto index 087c3461630f23af95ada86cba925aa6db46f5d7..a975e91565c9f7acf126e5f8b1c52bc313c35fed 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.proto +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.proto @@ -57,6 +57,7 @@ message Meta { string user = 4; ProxyEnv proxy_env = 5; repeated HostIP extraHosts = 6; + string hostname = 7; } enum NetMode { @@ -81,6 +82,7 @@ message Mount { CacheOpt cacheOpt = 20; SecretOpt secretOpt = 21; SSHOpt SSHOpt = 22; + string resultID = 23; } // MountType defines a type of a mount from a supported set diff --git a/vendor/github.com/moby/buildkit/solver/result.go b/vendor/github.com/moby/buildkit/solver/result.go index c7e100b08cc7e32ab36cd0033980b37fef3fb152..75b378c4fdbe80d5539e730fbbc0d18b144c6f71 100644 --- a/vendor/github.com/moby/buildkit/solver/result.go +++ b/vendor/github.com/moby/buildkit/solver/result.go @@ -91,12 +91,12 @@ type clonedCachedResult struct { cr CachedResult } -func (r *clonedCachedResult) ID() string { - return r.Result.ID() +func (ccr *clonedCachedResult) ID() string { + return ccr.Result.ID() } -func (cr *clonedCachedResult) CacheKeys() []ExportableCacheKey { - return cr.cr.CacheKeys() +func (ccr *clonedCachedResult) CacheKeys() []ExportableCacheKey { + return ccr.cr.CacheKeys() } type SharedCachedResult struct { diff --git a/vendor/github.com/moby/buildkit/solver/types.go b/vendor/github.com/moby/buildkit/solver/types.go index f148ac13eed4a0337d20fa9b4bd5c7f7328946c5..76e4c91163b3cb8b4198e2cdc57f120fa59f5d46 100644 --- a/vendor/github.com/moby/buildkit/solver/types.go +++ b/vendor/github.com/moby/buildkit/solver/types.go @@ -95,6 +95,8 @@ type CacheExportOpt struct { Convert func(context.Context, Result) (*Remote, error) // Mode defines a cache export algorithm Mode CacheExportMode + // Session is the session group to client (for auth credentials etc) + Session session.Group } // CacheExporter can export the artifacts of the build chain @@ -145,7 +147,8 @@ type Op interface { Exec(ctx context.Context, g session.Group, inputs []Result) (outputs []Result, err error) } -type ResultBasedCacheFunc func(context.Context, Result) (digest.Digest, error) +type ResultBasedCacheFunc func(context.Context, Result, session.Group) (digest.Digest, error) +type PreprocessFunc func(context.Context, Result, session.Group) error // CacheMap is a description for calculating the cache key of an operation. type CacheMap struct { @@ -171,7 +174,16 @@ type CacheMap struct { // For example, in LLB this is invoked to calculate the cache key based on // the checksum of file contents from input snapshots. ComputeDigestFunc ResultBasedCacheFunc + + // PreprocessFunc is a function that runs on an input before it is passed to op + PreprocessFunc PreprocessFunc } + + // Opts specifies generic options that will be passed to cache load calls if/when + // the key associated with this CacheMap is used to load a ref. It allows options + // such as oci descriptor content providers and progress writers to be passed to + // the cache. Opts should not have any impact on the computed cache key. + Opts CacheOpts } // ExportableCacheKey is a cache key connected with an exporter that can export diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource.go b/vendor/github.com/moby/buildkit/source/git/gitsource.go index 2c96e1b65d031fc62c0e67735b635178ebbce5ef..0197c3cdcef969904b21ad7b5716fb814b3ca1e0 100644 --- a/vendor/github.com/moby/buildkit/source/git/gitsource.go +++ b/vendor/github.com/moby/buildkit/source/git/gitsource.go @@ -13,7 +13,6 @@ import ( "regexp" "strings" - "github.com/docker/docker/pkg/locker" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/client" @@ -21,8 +20,10 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/secrets" "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver" "github.com/moby/buildkit/source" "github.com/moby/buildkit/util/progress/logs" + "github.com/moby/locker" "github.com/pkg/errors" "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" @@ -63,7 +64,7 @@ func (gs *gitSource) ID() string { } // needs to be called with repo lock -func (gs *gitSource) mountRemote(ctx context.Context, remote string, auth []string) (target string, release func(), retErr error) { +func (gs *gitSource) mountRemote(ctx context.Context, remote string, auth []string, g session.Group) (target string, release func(), retErr error) { remoteKey := "git-remote::" + remote sis, err := gs.md.Search(remoteKey) @@ -87,7 +88,7 @@ func (gs *gitSource) mountRemote(ctx context.Context, remote string, auth []stri initializeRepo := false if remoteRef == nil { - remoteRef, err = gs.cache.New(ctx, nil, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("shared git repo for %s", remote))) + remoteRef, err = gs.cache.New(ctx, nil, g, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("shared git repo for %s", remote))) if err != nil { return "", nil, errors.Wrapf(err, "failed to create new mutable for %s", remote) } @@ -104,7 +105,7 @@ func (gs *gitSource) mountRemote(ctx context.Context, remote string, auth []stri } }() - mount, err := remoteRef.Mount(ctx, false) + mount, err := remoteRef.Mount(ctx, false, g) if err != nil { return "", nil, err } @@ -166,7 +167,7 @@ func (gs *gitSourceHandler) shaToCacheKey(sha string) string { return key } -func (gs *gitSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager) (source.SourceInstance, error) { +func (gs *gitSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, _ solver.Vertex) (source.SourceInstance, error) { gitIdentifier, ok := id.(*source.GitIdentifier) if !ok { return nil, errors.Errorf("invalid git identifier %v", id) @@ -231,7 +232,7 @@ func (gs *gitSourceHandler) getAuthToken(ctx context.Context, g session.Group) e }) } -func (gs *gitSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, bool, error) { +func (gs *gitSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, solver.CacheOpts, bool, error) { remote := gs.src.Remote ref := gs.src.Ref if ref == "" { @@ -243,14 +244,14 @@ func (gs *gitSourceHandler) CacheKey(ctx context.Context, g session.Group, index if isCommitSHA(ref) { ref = gs.shaToCacheKey(ref) gs.cacheKey = ref - return ref, true, nil + return ref, nil, true, nil } gs.getAuthToken(ctx, g) - gitDir, unmountGitDir, err := gs.mountRemote(ctx, remote, gs.auth) + gitDir, unmountGitDir, err := gs.mountRemote(ctx, remote, gs.auth, g) if err != nil { - return "", false, err + return "", nil, false, err } defer unmountGitDir() @@ -258,21 +259,21 @@ func (gs *gitSourceHandler) CacheKey(ctx context.Context, g session.Group, index buf, err := gitWithinDir(ctx, gitDir, "", gs.auth, "ls-remote", "origin", ref) if err != nil { - return "", false, errors.Wrapf(err, "failed to fetch remote %s", remote) + return "", nil, false, errors.Wrapf(err, "failed to fetch remote %s", remote) } out := buf.String() idx := strings.Index(out, "\t") if idx == -1 { - return "", false, errors.Errorf("repository does not contain ref %s, output: %q", ref, string(out)) + return "", nil, false, errors.Errorf("repository does not contain ref %s, output: %q", ref, string(out)) } sha := string(out[:idx]) if !isCommitSHA(sha) { - return "", false, errors.Errorf("invalid commit sha %q", sha) + return "", nil, false, errors.Errorf("invalid commit sha %q", sha) } sha = gs.shaToCacheKey(sha) gs.cacheKey = sha - return sha, true, nil + return sha, nil, true, nil } func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out cache.ImmutableRef, retErr error) { @@ -284,7 +285,7 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out cacheKey := gs.cacheKey if cacheKey == "" { var err error - cacheKey, _, err = gs.CacheKey(ctx, g, 0) + cacheKey, _, _, err = gs.CacheKey(ctx, g, 0) if err != nil { return nil, err } @@ -306,7 +307,7 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out gs.locker.Lock(gs.src.Remote) defer gs.locker.Unlock(gs.src.Remote) - gitDir, unmountGitDir, err := gs.mountRemote(ctx, gs.src.Remote, gs.auth) + gitDir, unmountGitDir, err := gs.mountRemote(ctx, gs.src.Remote, gs.auth, g) if err != nil { return nil, err } @@ -344,7 +345,7 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out } } - checkoutRef, err := gs.cache.New(ctx, nil, cache.WithRecordType(client.UsageRecordTypeGitCheckout), cache.WithDescription(fmt.Sprintf("git snapshot for %s#%s", gs.src.Remote, ref))) + checkoutRef, err := gs.cache.New(ctx, nil, g, cache.WithRecordType(client.UsageRecordTypeGitCheckout), cache.WithDescription(fmt.Sprintf("git snapshot for %s#%s", gs.src.Remote, ref))) if err != nil { return nil, errors.Wrapf(err, "failed to create new mutable for %s", gs.src.Remote) } @@ -355,7 +356,7 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out } }() - mount, err := checkoutRef.Mount(ctx, false) + mount, err := checkoutRef.Mount(ctx, false, g) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/source/http/httpsource.go b/vendor/github.com/moby/buildkit/source/http/httpsource.go index 951d99c524190ebef8b8c83d40b620c7b41e9b5a..dd9c12763bfe981a82949613f11afd6b909e419e 100644 --- a/vendor/github.com/moby/buildkit/source/http/httpsource.go +++ b/vendor/github.com/moby/buildkit/source/http/httpsource.go @@ -16,13 +16,14 @@ import ( "time" "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/locker" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver" "github.com/moby/buildkit/source" "github.com/moby/buildkit/util/tracing" + "github.com/moby/locker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" bolt "go.etcd.io/bbolt" @@ -56,19 +57,19 @@ func NewSource(opt Opt) (source.Source, error) { } func (hs *httpSource) ID() string { - return source.HttpsScheme + return source.HTTPSScheme } type httpSourceHandler struct { *httpSource - src source.HttpIdentifier + src source.HTTPIdentifier refID string cacheKey digest.Digest sm *session.Manager } -func (hs *httpSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager) (source.SourceInstance, error) { - httpIdentifier, ok := id.(*source.HttpIdentifier) +func (hs *httpSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, _ solver.Vertex) (source.SourceInstance, error) { + httpIdentifier, ok := id.(*source.HTTPIdentifier) if !ok { return nil, errors.Errorf("invalid http identifier %v", id) } @@ -122,26 +123,26 @@ func (hs *httpSourceHandler) formatCacheKey(filename string, dgst digest.Digest, return digest.FromBytes(dt) } -func (hs *httpSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, bool, error) { +func (hs *httpSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, solver.CacheOpts, bool, error) { if hs.src.Checksum != "" { hs.cacheKey = hs.src.Checksum - return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, nil), hs.src.Checksum, "").String(), true, nil + return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, nil), hs.src.Checksum, "").String(), nil, true, nil } uh, err := hs.urlHash() if err != nil { - return "", false, nil + return "", nil, false, nil } // look up metadata(previously stored headers) for that URL sis, err := hs.md.Search(uh.String()) if err != nil { - return "", false, errors.Wrapf(err, "failed to search metadata for %s", uh) + return "", nil, false, errors.Wrapf(err, "failed to search metadata for %s", uh) } req, err := http.NewRequest("GET", hs.src.URL, nil) if err != nil { - return "", false, err + return "", nil, false, err } req = req.WithContext(ctx) m := map[string]*metadata.StorageItem{} @@ -198,7 +199,7 @@ func (hs *httpSourceHandler) CacheKey(ctx context.Context, g session.Group, inde if dgst != "" { modTime := getModTime(si) resp.Body.Close() - return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, modTime).String(), true, nil + return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, modTime).String(), nil, true, nil } } } @@ -209,10 +210,10 @@ func (hs *httpSourceHandler) CacheKey(ctx context.Context, g session.Group, inde resp, err := client.Do(req) if err != nil { - return "", false, err + return "", nil, false, err } if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return "", false, errors.Errorf("invalid response status %d", resp.StatusCode) + return "", nil, false, errors.Errorf("invalid response status %d", resp.StatusCode) } if resp.StatusCode == http.StatusNotModified { respETag := resp.Header.Get("ETag") @@ -225,31 +226,31 @@ func (hs *httpSourceHandler) CacheKey(ctx context.Context, g session.Group, inde } si, ok := m[respETag] if !ok { - return "", false, errors.Errorf("invalid not-modified ETag: %v", respETag) + return "", nil, false, errors.Errorf("invalid not-modified ETag: %v", respETag) } hs.refID = si.ID() dgst := getChecksum(si) if dgst == "" { - return "", false, errors.Errorf("invalid metadata change") + return "", nil, false, errors.Errorf("invalid metadata change") } modTime := getModTime(si) resp.Body.Close() - return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, modTime).String(), true, nil + return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, modTime).String(), nil, true, nil } - ref, dgst, err := hs.save(ctx, resp) + ref, dgst, err := hs.save(ctx, resp, g) if err != nil { - return "", false, err + return "", nil, false, err } ref.Release(context.TODO()) hs.cacheKey = dgst - return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, resp.Header.Get("Last-Modified")).String(), true, nil + return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, resp.Header.Get("Last-Modified")).String(), nil, true, nil } -func (hs *httpSourceHandler) save(ctx context.Context, resp *http.Response) (ref cache.ImmutableRef, dgst digest.Digest, retErr error) { - newRef, err := hs.cache.New(ctx, nil, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("http url %s", hs.src.URL))) +func (hs *httpSourceHandler) save(ctx context.Context, resp *http.Response, s session.Group) (ref cache.ImmutableRef, dgst digest.Digest, retErr error) { + newRef, err := hs.cache.New(ctx, nil, s, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("http url %s", hs.src.URL))) if err != nil { return nil, "", err } @@ -264,7 +265,7 @@ func (hs *httpSourceHandler) save(ctx context.Context, resp *http.Response) (ref } }() - mount, err := newRef.Mount(ctx, false) + mount, err := newRef.Mount(ctx, false, s) if err != nil { return nil, "", err } @@ -391,7 +392,7 @@ func (hs *httpSourceHandler) Snapshot(ctx context.Context, g session.Group) (cac return nil, err } - ref, dgst, err := hs.save(ctx, resp) + ref, dgst, err := hs.save(ctx, resp, g) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/source/identifier.go b/vendor/github.com/moby/buildkit/source/identifier.go index fd500a157a28de2083d04149eb185e952876d8f2..c240731d82d1c359118a1aeeb77834143ffc4a04 100644 --- a/vendor/github.com/moby/buildkit/source/identifier.go +++ b/vendor/github.com/moby/buildkit/source/identifier.go @@ -30,8 +30,8 @@ const ( DockerImageScheme = "docker-image" GitScheme = "git" LocalScheme = "local" - HttpScheme = "http" - HttpsScheme = "https" + HTTPScheme = "http" + HTTPSScheme = "https" ) type Identifier interface { @@ -52,10 +52,10 @@ func FromString(s string) (Identifier, error) { return NewGitIdentifier(parts[1]) case LocalScheme: return NewLocalIdentifier(parts[1]) - case HttpsScheme: - return NewHttpIdentifier(parts[1], true) - case HttpScheme: - return NewHttpIdentifier(parts[1], false) + case HTTPSScheme: + return NewHTTPIdentifier(parts[1], true) + case HTTPScheme: + return NewHTTPIdentifier(parts[1], false) default: return nil, errors.Wrapf(errNotFound, "unknown schema %s", parts[0]) } @@ -142,7 +142,7 @@ func FromLLB(op *pb.Op_Source, platform *pb.Platform) (Identifier, error) { } } } - if id, ok := id.(*HttpIdentifier); ok { + if id, ok := id.(*HTTPIdentifier); ok { for k, v := range op.Source.Attrs { switch k { case pb.AttrHTTPChecksum: @@ -196,7 +196,7 @@ func NewImageIdentifier(str string) (*ImageIdentifier, error) { return &ImageIdentifier{Reference: ref}, nil } -func (_ *ImageIdentifier) ID() string { +func (*ImageIdentifier) ID() string { return DockerImageScheme } @@ -217,15 +217,15 @@ func (*LocalIdentifier) ID() string { return LocalScheme } -func NewHttpIdentifier(str string, tls bool) (*HttpIdentifier, error) { +func NewHTTPIdentifier(str string, tls bool) (*HTTPIdentifier, error) { proto := "https://" if !tls { proto = "http://" } - return &HttpIdentifier{TLS: tls, URL: proto + str}, nil + return &HTTPIdentifier{TLS: tls, URL: proto + str}, nil } -type HttpIdentifier struct { +type HTTPIdentifier struct { TLS bool URL string Checksum digest.Digest @@ -235,8 +235,8 @@ type HttpIdentifier struct { GID int } -func (_ *HttpIdentifier) ID() string { - return HttpsScheme +func (*HTTPIdentifier) ID() string { + return HTTPSScheme } func (r ResolveMode) String() string { diff --git a/vendor/github.com/moby/buildkit/source/local/local.go b/vendor/github.com/moby/buildkit/source/local/local.go index 06241d03a1af2633cfd105d171ac8990c114d97a..3d385ab18fd650af0a3d13169a948777f4683e04 100644 --- a/vendor/github.com/moby/buildkit/source/local/local.go +++ b/vendor/github.com/moby/buildkit/source/local/local.go @@ -14,6 +14,7 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/filesync" "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver" "github.com/moby/buildkit/source" "github.com/moby/buildkit/util/progress" digest "github.com/opencontainers/go-digest" @@ -51,7 +52,7 @@ func (ls *localSource) ID() string { return source.LocalScheme } -func (ls *localSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager) (source.SourceInstance, error) { +func (ls *localSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, _ solver.Vertex) (source.SourceInstance, error) { localIdentifier, ok := id.(*source.LocalIdentifier) if !ok { return nil, errors.Errorf("invalid local identifier %v", id) @@ -70,13 +71,13 @@ type localSourceHandler struct { *localSource } -func (ls *localSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, bool, error) { +func (ls *localSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, solver.CacheOpts, bool, error) { sessionID := ls.src.SessionID if sessionID == "" { id := g.SessionIterator().NextSession() if id == "" { - return "", false, errors.New("could not access local files without session") + return "", nil, false, errors.New("could not access local files without session") } sessionID = id } @@ -87,15 +88,15 @@ func (ls *localSourceHandler) CacheKey(ctx context.Context, g session.Group, ind FollowPaths []string }{SessionID: sessionID, IncludePatterns: ls.src.IncludePatterns, ExcludePatterns: ls.src.ExcludePatterns, FollowPaths: ls.src.FollowPaths}) if err != nil { - return "", false, err + return "", nil, false, err } - return "session:" + ls.src.Name + ":" + digest.FromBytes(dt).String(), true, nil + return "session:" + ls.src.Name + ":" + digest.FromBytes(dt).String(), nil, true, nil } func (ls *localSourceHandler) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) { var ref cache.ImmutableRef err := ls.sm.Any(ctx, g, func(ctx context.Context, _ string, c session.Caller) error { - r, err := ls.snapshot(ctx, c) + r, err := ls.snapshot(ctx, g, c) if err != nil { return err } @@ -108,7 +109,7 @@ func (ls *localSourceHandler) Snapshot(ctx context.Context, g session.Group) (ca return ref, nil } -func (ls *localSourceHandler) snapshot(ctx context.Context, caller session.Caller) (out cache.ImmutableRef, retErr error) { +func (ls *localSourceHandler) snapshot(ctx context.Context, s session.Group, caller session.Caller) (out cache.ImmutableRef, retErr error) { sharedKey := keySharedKey + ":" + ls.src.Name + ":" + ls.src.SharedKeyHint + ":" + caller.SharedKey() // TODO: replace caller.SharedKey() with source based hint from client(absolute-path+nodeid) var mutable cache.MutableRef @@ -125,7 +126,7 @@ func (ls *localSourceHandler) snapshot(ctx context.Context, caller session.Calle } if mutable == nil { - m, err := ls.cm.New(ctx, nil, cache.CachePolicyRetain, cache.WithRecordType(client.UsageRecordTypeLocalSource), cache.WithDescription(fmt.Sprintf("local source for %s", ls.src.Name))) + m, err := ls.cm.New(ctx, nil, s, cache.CachePolicyRetain, cache.WithRecordType(client.UsageRecordTypeLocalSource), cache.WithDescription(fmt.Sprintf("local source for %s", ls.src.Name))) if err != nil { return nil, err } @@ -145,7 +146,7 @@ func (ls *localSourceHandler) snapshot(ctx context.Context, caller session.Calle } }() - mount, err := mutable.Mount(ctx, false) + mount, err := mutable.Mount(ctx, false, s) if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/source/manager.go b/vendor/github.com/moby/buildkit/source/manager.go index 95393890c980e8cb9d6d3d4069d9a38568a861ba..aba45bffe13e942587702b6738de53dd030573c2 100644 --- a/vendor/github.com/moby/buildkit/source/manager.go +++ b/vendor/github.com/moby/buildkit/source/manager.go @@ -6,16 +6,17 @@ import ( "github.com/moby/buildkit/cache" "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" "github.com/pkg/errors" ) type Source interface { ID() string - Resolve(ctx context.Context, id Identifier, sm *session.Manager) (SourceInstance, error) + Resolve(ctx context.Context, id Identifier, sm *session.Manager, vtx solver.Vertex) (SourceInstance, error) } type SourceInstance interface { - CacheKey(ctx context.Context, g session.Group, index int) (string, bool, error) + CacheKey(ctx context.Context, g session.Group, index int) (string, solver.CacheOpts, bool, error) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) } @@ -36,7 +37,7 @@ func (sm *Manager) Register(src Source) { sm.mu.Unlock() } -func (sm *Manager) Resolve(ctx context.Context, id Identifier, sessM *session.Manager) (SourceInstance, error) { +func (sm *Manager) Resolve(ctx context.Context, id Identifier, sessM *session.Manager, vtx solver.Vertex) (SourceInstance, error) { sm.mu.Lock() src, ok := sm.sources[id.ID()] sm.mu.Unlock() @@ -45,5 +46,5 @@ func (sm *Manager) Resolve(ctx context.Context, id Identifier, sessM *session.Ma return nil, errors.Errorf("no handler for %s", id.ID()) } - return src.Resolve(ctx, id, sessM) + return src.Resolve(ctx, id, sessM, vtx) } diff --git a/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go b/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go index 281dfabd64a22256c01716977a7bc3208bfd3591..addfccfade0169c5f85be68d62798c3e8e03cece 100644 --- a/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go +++ b/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go @@ -1,3 +1,3 @@ -package moby_buildkit_v1_apicaps +package moby_buildkit_v1_apicaps //nolint:golint //go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. caps.proto diff --git a/vendor/github.com/moby/buildkit/util/archutil/386_binary.go b/vendor/github.com/moby/buildkit/util/archutil/386_binary.go new file mode 100644 index 0000000000000000000000000000000000000000..dab00ecf5eaecf324ce28d417289e5df26896fbe --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/archutil/386_binary.go @@ -0,0 +1,8 @@ +// +build !386 + +package archutil + +// This file is generated by running make inside the archutil package. +// Do not edit manually. + +const Binary386 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\xd8\x31\x8a\xc2\x40\x14\x06\xe0\x7f\x36\xb3\xd9\x84\xdd\x62\x0e\x10\x96\x2d\xb6\xd8\x6a\x58\x21\x57\x50\x1b\x11\x2c\x3c\x80\x62\xd0\x2a\x09\xc9\x08\x5a\x99\x23\x78\x18\x0b\x4b\x2f\x20\xd8\x5b\x7b\x0f\x79\xc9\x0b\xa4\x48\x63\xff\x3e\xf8\x19\x7c\xf3\x7c\xa9\xdf\x1c\x86\x93\x91\x52\x0a\xad\x37\x78\xa8\x7f\x1d\x75\x10\x03\xf8\x37\x4d\x3d\xc6\x0f\x34\xfe\xa0\xdb\x7b\x52\xe9\x80\x72\x03\x40\xd1\x54\x33\x68\xee\x4d\x33\x83\x12\x02\xa0\xbc\x73\x9d\xfa\x4e\x94\x4a\x07\x94\x08\x40\xc4\xff\xa7\xcc\x1e\x6e\x85\x1e\x3e\x8f\xa5\x9e\x2f\x9e\x37\x9e\xce\xe9\x7b\x17\xaa\x29\x08\x21\x84\x10\x42\x08\x21\x84\x10\xa2\xcf\x99\x96\xe6\xc1\xfd\x5a\xc1\x96\x9b\xd2\x15\x6e\xb1\x84\x4d\x33\x97\xd8\x75\xba\xb5\x79\x91\xe5\x49\xe1\xf6\xb0\x2e\xd9\xb9\x57\xe6\x7e\x02\xf8\xa8\xdf\x13\x78\xcf\xe7\x1d\xbf\xa5\xf9\xfc\xe6\xbd\xdd\xe7\x37\x07\x5a\xf0\xc3\x4e\x9f\xea\x9c\x5e\xa7\x1e\x1a\xe0\xb7\xa7\xef\x19\x00\x00\xff\xff\x32\x71\x64\x98\xd0\x10\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/386_check.go b/vendor/github.com/moby/buildkit/util/archutil/386_check.go similarity index 78% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/386_check.go rename to vendor/github.com/moby/buildkit/util/archutil/386_check.go index 8137d35047b9751340459eb1dbefddf1f7deeb0c..5ef488f1e5522a1c0c8975b1aa68dc2bbed5c9c5 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/386_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/386_check.go @@ -1,6 +1,6 @@ // +build !386 -package binfmt_misc +package archutil func i386Supported() error { return check(Binary386) diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/386_check_386.go b/vendor/github.com/moby/buildkit/util/archutil/386_check_386.go similarity index 74% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/386_check_386.go rename to vendor/github.com/moby/buildkit/util/archutil/386_check_386.go index 2b2ab45be453d2a94b4f77eaa8ca52a16e9d83c9..f2c0ee32423d6905f7ea877a9e7bb6a6e16bc0d3 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/386_check_386.go +++ b/vendor/github.com/moby/buildkit/util/archutil/386_check_386.go @@ -1,6 +1,6 @@ // +build 386 -package binfmt_misc +package archutil func i386Supported() error { return nil diff --git a/vendor/github.com/moby/buildkit/util/archutil/amd64_binary.go b/vendor/github.com/moby/buildkit/util/archutil/amd64_binary.go new file mode 100644 index 0000000000000000000000000000000000000000..59f135f001c68ffb5ffe3305d238c64ba19ca38f --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/archutil/amd64_binary.go @@ -0,0 +1,8 @@ +// +build !amd64 + +package archutil + +// This file is generated by running make inside the archutil package. +// Do not edit manually. + +const Binaryamd64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x06\x3b\x06\x30\x4f\xc0\x01\xcc\x77\x80\x8a\x1b\x08\xc0\x95\x30\x38\x30\x58\x30\xb0\x30\x38\x30\xb0\x30\x30\x83\xd5\xb2\x30\x20\x03\x07\x14\xda\x01\x6a\x34\x8c\x66\x80\x9a\x03\xe2\xb2\x22\xf1\x61\xf6\xc1\x68\x1e\xa8\x30\x8c\x86\xa9\x63\x81\x62\x05\xa8\x79\x0a\x8c\x0e\xa8\x34\x54\x39\x8c\xe6\x80\xd2\x81\x4f\x4b\x52\xd8\x18\x88\x07\x30\x67\xb1\x40\xd9\x20\xb7\xba\xfb\x85\x82\xdc\x7d\x80\x05\xea\xfe\x51\x30\x0a\x46\xc1\x28\x18\x05\xa3\x60\x14\x8c\x82\x51\x30\x0a\x46\xc1\x28\x18\x05\xa3\x60\xa8\x00\x8f\xe3\x07\x6c\x40\x94\xe1\x7f\x7e\x56\x06\xbd\xe2\x8c\xe2\x92\xa2\x92\xc4\x24\x06\xbd\xbc\xfc\x92\x54\xbd\xf4\xbc\x52\xbd\x82\xa2\xfc\x82\xd4\xa2\x92\x4a\x06\xbd\x92\xd4\x8a\x12\x8a\xed\xe3\x66\x60\x60\x60\x07\x8f\x33\xa0\xf7\xdf\x51\xfb\xed\x0c\x68\xfd\x77\x18\x90\x83\xf6\xbd\xe1\x7d\x79\xf8\xb8\x01\xda\x78\x01\x03\x62\x9c\x01\x9d\xcf\x8c\xc5\x5d\x3c\x50\xfd\x2a\x04\xf4\x03\x02\x00\x00\xff\xff\x90\x57\x64\xbb\x30\x11\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check.go b/vendor/github.com/moby/buildkit/util/archutil/amd64_check.go similarity index 79% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check.go rename to vendor/github.com/moby/buildkit/util/archutil/amd64_check.go index 619133893bbba30b79a2c299b8c975fc92af1ecb..3942860553f523a07663c3810c56e273602cc351 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/amd64_check.go @@ -1,6 +1,6 @@ // +build !amd64 -package binfmt_misc +package archutil func amd64Supported() error { return check(Binaryamd64) diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check_amd64.go b/vendor/github.com/moby/buildkit/util/archutil/amd64_check_amd64.go similarity index 75% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check_amd64.go rename to vendor/github.com/moby/buildkit/util/archutil/amd64_check_amd64.go index e437dfef6e8b4f2b7b0b6f8bf74cc8d597fc1cd8..538840d1bde15b7b2ebb53244be05a01cd5bb3bc 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_check_amd64.go +++ b/vendor/github.com/moby/buildkit/util/archutil/amd64_check_amd64.go @@ -1,6 +1,6 @@ // +build amd64 -package binfmt_misc +package archutil func amd64Supported() error { return nil diff --git a/vendor/github.com/moby/buildkit/util/archutil/arm64_binary.go b/vendor/github.com/moby/buildkit/util/archutil/arm64_binary.go new file mode 100644 index 0000000000000000000000000000000000000000..f5b7feffca5a1bd8eb8424714e1b190a1bcd99f4 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/archutil/arm64_binary.go @@ -0,0 +1,8 @@ +// +build !arm64 + +package archutil + +// This file is generated by running make inside the archutil package. +// Do not edit manually. + +const Binaryarm64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x86\xed\x0c\x20\xde\x06\x06\x07\x30\xdf\x01\x2a\x7e\x81\x01\x01\x1c\x18\x2c\x18\x98\x18\x1c\x18\x98\x19\x98\xc0\x6a\x59\x19\x18\x50\x64\x91\xe9\x3d\x50\xde\x1e\xb8\x3c\xc4\xae\xc0\xa7\x25\x29\x6c\x0c\xc4\x03\x01\x38\xab\xe1\xd2\x0a\xee\x86\x4b\x8c\x0c\x0c\x57\x18\xf4\x8a\x33\x8a\x4b\x8a\x4a\x12\x93\x18\xf4\x4a\x52\x2b\x4a\x18\xa8\x00\xb8\xa1\x2e\x84\xb9\x0d\x16\x0e\x1b\xa0\x7c\x1e\x34\xf5\x2c\x68\x7c\x90\x5e\x66\x2c\xe6\xc2\xfc\x2f\x88\x45\x3d\x32\x00\x04\x00\x00\xff\xff\xbb\x46\x88\x1e\x90\x01\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check.go b/vendor/github.com/moby/buildkit/util/archutil/arm64_check.go similarity index 79% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check.go rename to vendor/github.com/moby/buildkit/util/archutil/arm64_check.go index 334d3142177ca31a58d3142994c7ea9fbb22d761..76600d070b872df7454c4eac3396db91e3b7b16e 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/arm64_check.go @@ -1,6 +1,6 @@ // +build !arm64 -package binfmt_misc +package archutil func arm64Supported() error { return check(Binaryarm64) diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check_arm64.go b/vendor/github.com/moby/buildkit/util/archutil/arm64_check_arm64.go similarity index 75% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check_arm64.go rename to vendor/github.com/moby/buildkit/util/archutil/arm64_check_arm64.go index a5fc6dc6659b687d4f71c0f76cd1fe1adfee5c79..1b1c8c6ae01674b047286aeb5ce3da588a9c583b 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_check_arm64.go +++ b/vendor/github.com/moby/buildkit/util/archutil/arm64_check_arm64.go @@ -1,6 +1,6 @@ // +build arm64 -package binfmt_misc +package archutil func arm64Supported() error { return nil diff --git a/vendor/github.com/moby/buildkit/util/archutil/arm_binary.go b/vendor/github.com/moby/buildkit/util/archutil/arm_binary.go new file mode 100644 index 0000000000000000000000000000000000000000..15e6f25ac8ae36f53401956f0dc4d4ea28a75790 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/archutil/arm_binary.go @@ -0,0 +1,8 @@ +// +build !arm + +package archutil + +// This file is generated by running make inside the archutil package. +// Do not edit manually. + +const Binaryarm = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\x8c\x8f\x3d\x6e\xc2\x40\x10\x85\xbf\x89\x7f\x12\x29\x29\x92\x9c\x20\xe9\xa8\xb6\xf2\x05\x5c\x40\x05\x05\xdc\x60\x2d\x2c\xe1\xce\xb2\x07\x89\x0e\xce\xc0\x09\x7c\x08\x6e\x64\x51\x73\x05\xe4\x65\x2d\xb6\x70\xc1\x93\x46\xb3\xfb\xcd\x2b\xde\x3b\xce\x97\x0b\x11\x61\xd4\x1b\x33\x86\x9f\x22\x64\xc0\xe5\x01\x93\x8c\x3f\x77\x8b\x89\x78\xba\xc5\xcd\x09\xdc\x24\x9e\xad\xaf\xba\x65\x42\x29\xf0\xed\x5e\x5d\x2f\x75\xd7\x03\xb7\xfc\x07\xb0\xa5\x2d\x2a\xe4\x1d\xf8\x10\x4c\xbb\x6b\xb5\x51\x5b\x60\xb4\x3c\x28\x26\xdf\xac\x8c\x55\x6d\xaa\x62\xaf\x65\xcb\xcb\xfa\xf4\x09\x53\xdf\x47\x81\xaf\xe0\x1e\xfb\x3d\x44\x88\xa0\x1e\xf9\xd0\xe5\x37\xf0\x49\xb0\xa3\x80\x9f\x81\xff\x09\xdf\x3d\x00\x00\xff\xff\x0b\x8f\xbf\xbd\x54\x01\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check.go b/vendor/github.com/moby/buildkit/util/archutil/arm_check.go similarity index 78% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check.go rename to vendor/github.com/moby/buildkit/util/archutil/arm_check.go index 3a10daafe6ef300704ec0ca1fd03b756ba066590..745e70505349d21401235ae3252d793be641d7d8 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/arm_check.go @@ -1,6 +1,6 @@ // +build !arm -package binfmt_misc +package archutil func armSupported() error { return check(Binaryarm) diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check_arm.go b/vendor/github.com/moby/buildkit/util/archutil/arm_check_arm.go similarity index 74% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check_arm.go rename to vendor/github.com/moby/buildkit/util/archutil/arm_check_arm.go index 11cfe616cdf9b7511ea64f9e58409c81ab6e508f..d6dea6fa734a65637cce8d0946d5914480ae9cae 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_check_arm.go +++ b/vendor/github.com/moby/buildkit/util/archutil/arm_check_arm.go @@ -1,6 +1,6 @@ // +build arm -package binfmt_misc +package archutil func armSupported() error { return nil diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/check_unix.go b/vendor/github.com/moby/buildkit/util/archutil/check_unix.go similarity index 97% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/check_unix.go rename to vendor/github.com/moby/buildkit/util/archutil/check_unix.go index 670e6d2c21ec2b127ba8bddc28ff6c0ee90076e6..236c96a5e929635ac9ca8a4bd7dbf002ee265a09 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/check_unix.go +++ b/vendor/github.com/moby/buildkit/util/archutil/check_unix.go @@ -1,6 +1,6 @@ // +build !windows -package binfmt_misc +package archutil import ( "bytes" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/check_windows.go b/vendor/github.com/moby/buildkit/util/archutil/check_windows.go similarity index 90% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/check_windows.go rename to vendor/github.com/moby/buildkit/util/archutil/check_windows.go index f246184778b62f2949447cca72c2f91b6b357d80..18ec34dbae3632d9660fd168f3afdca23b7767cc 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/check_windows.go +++ b/vendor/github.com/moby/buildkit/util/archutil/check_windows.go @@ -1,6 +1,6 @@ // +build windows -package binfmt_misc +package archutil import ( "errors" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go b/vendor/github.com/moby/buildkit/util/archutil/detect.go similarity index 90% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go rename to vendor/github.com/moby/buildkit/util/archutil/detect.go index 73eb405c6e8ba3ea4813ce02f0881cf813cbbab4..7b4673c6bab54a6b888fc24c97822bd9a88dac33 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/detect.go +++ b/vendor/github.com/moby/buildkit/util/archutil/detect.go @@ -1,4 +1,4 @@ -package binfmt_misc +package archutil import ( "strings" @@ -82,37 +82,37 @@ func WarnIfUnsupported(pfs []string) { if p != def { if p == "linux/amd64" { if err := amd64Supported(); err != nil { - printPlatfromWarning(p, err) + printPlatformWarning(p, err) } } if p == "linux/arm64" { if err := arm64Supported(); err != nil { - printPlatfromWarning(p, err) + printPlatformWarning(p, err) } } if p == "linux/riscv64" { if err := riscv64Supported(); err != nil { - printPlatfromWarning(p, err) + printPlatformWarning(p, err) } } if p == "linux/ppc64le" { if err := ppc64leSupported(); err != nil { - printPlatfromWarning(p, err) + printPlatformWarning(p, err) } } if p == "linux/s390x" { if err := s390xSupported(); err != nil { - printPlatfromWarning(p, err) + printPlatformWarning(p, err) } } if p == "linux/386" { if err := i386Supported(); err != nil { - printPlatfromWarning(p, err) + printPlatformWarning(p, err) } } if strings.HasPrefix(p, "linux/arm/v6") || strings.HasPrefix(p, "linux/arm/v7") { if err := armSupported(); err != nil { - printPlatfromWarning(p, err) + printPlatformWarning(p, err) } } } @@ -123,11 +123,11 @@ func defaultPlatform() string { return platforms.Format(platforms.Normalize(platforms.DefaultSpec())) } -func printPlatfromWarning(p string, err error) { +func printPlatformWarning(p string, err error) { if strings.Contains(err.Error(), "exec format error") { logrus.Warnf("platform %s cannot pass the validation, kernel support for miscellaneous binary may have not enabled.", p) } else if strings.Contains(err.Error(), "no such file or directory") { - logrus.Warnf("platforms %s cannot pass the validation, '-F' flag might have not set for 'binfmt_misc'.", p) + logrus.Warnf("platforms %s cannot pass the validation, '-F' flag might have not set for 'archutil'.", p) } else { logrus.Warnf("platforms %s cannot pass the validation: %s", p, err.Error()) } diff --git a/vendor/github.com/moby/buildkit/util/archutil/ppc64le_binary.go b/vendor/github.com/moby/buildkit/util/archutil/ppc64le_binary.go new file mode 100644 index 0000000000000000000000000000000000000000..3e4d4b38e93c0dd1adc5bc6538fab2c048e9e781 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/archutil/ppc64le_binary.go @@ -0,0 +1,8 @@ +// +build !ppc64le + +package archutil + +// This file is generated by running make inside the archutil package. +// Do not edit manually. + +const Binaryppc64le = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x06\x51\x06\x10\x6f\x03\x03\x83\x00\x88\xef\x00\x15\xbf\x01\x97\x07\x89\x59\x30\x30\x31\x38\x30\xb0\x30\x30\x83\xd5\xb2\x32\xa0\x00\x01\x64\x7a\x0f\x94\xb3\x07\x2e\x0d\xb1\x2b\xf0\x69\x49\x0a\x1b\x03\xf1\x40\x00\xa1\xdb\x82\x81\x21\xc1\x82\x89\x81\xc1\x85\x41\xaf\x38\xa3\xb8\xa4\xa8\x24\x31\x89\x41\xaf\x24\xb5\xa2\x84\x41\x2f\x35\x23\x3e\xad\x28\x31\x37\x95\x81\x62\xc0\x0d\x75\x29\xcc\x8d\xb0\xf0\xd8\x00\xe5\xf3\xa0\xa9\xe7\x40\xe3\x0b\x42\xf5\x33\x21\xfc\x2f\x80\x1a\x0e\xa8\x80\x05\x8d\x0f\xd2\xcb\x8c\x45\x1d\x4c\xbf\x34\x16\xf5\xc8\x00\x10\x00\x00\xff\xff\x59\x3e\xf6\x64\xd8\x01\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check.go b/vendor/github.com/moby/buildkit/util/archutil/ppc64le_check.go similarity index 80% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check.go rename to vendor/github.com/moby/buildkit/util/archutil/ppc64le_check.go index 4d5b3bf877a18d430aa53d2bf9305f973e493475..017258390bc7d1d1879675bdb82c3895b8f307f8 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/ppc64le_check.go @@ -1,6 +1,6 @@ // +build !ppc64le -package binfmt_misc +package archutil func ppc64leSupported() error { return check(Binaryppc64le) diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check_ppc64le.go b/vendor/github.com/moby/buildkit/util/archutil/ppc64le_check_ppc64le.go similarity index 76% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check_ppc64le.go rename to vendor/github.com/moby/buildkit/util/archutil/ppc64le_check_ppc64le.go index 27e4ab8f1a9343595fbea2a73ce365b05ec89c46..7da12d4b5ced30a18b9513eb3045d6b73a39ba01 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_check_ppc64le.go +++ b/vendor/github.com/moby/buildkit/util/archutil/ppc64le_check_ppc64le.go @@ -1,6 +1,6 @@ // +build ppc64le -package binfmt_misc +package archutil func ppc64leSupported() error { return nil diff --git a/vendor/github.com/moby/buildkit/util/archutil/riscv64_binary.go b/vendor/github.com/moby/buildkit/util/archutil/riscv64_binary.go new file mode 100644 index 0000000000000000000000000000000000000000..a5e115d6bdde3b6e046a3d174c4ec7bd8bda7a27 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/archutil/riscv64_binary.go @@ -0,0 +1,8 @@ +// +build !riscv64 + +package archutil + +// This file is generated by running make inside the archutil package. +// Do not edit manually. + +const Binaryriscv64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x86\xcf\x0c\x20\xde\x06\x06\x88\x98\x03\x54\xfc\x02\x94\x66\x01\x8b\x59\x30\x30\x31\x38\x30\x30\x33\x30\x81\x55\xb1\x32\x20\x03\x46\x14\x7a\x0f\x94\x07\xa3\x19\x04\x20\x54\xe0\xd3\x92\x14\x36\x06\xe2\x01\x54\x1b\x83\x30\x2b\x03\xc3\x64\x8e\x0b\xac\xc5\x20\x8e\x5e\x71\x46\x71\x49\x51\x49\x62\x12\x83\x5e\x49\x6a\x45\x09\x03\x15\x00\x37\xd4\xe5\x30\xb7\xc1\xc2\x61\x03\x94\xcf\x83\xa6\x9e\x05\x8d\x0f\x52\xcd\x8c\xc5\x5c\x98\xff\x05\xb1\xa8\x47\x06\x80\x00\x00\x00\xff\xff\x34\x4f\x05\xf7\x90\x01\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check.go b/vendor/github.com/moby/buildkit/util/archutil/riscv64_check.go similarity index 80% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check.go rename to vendor/github.com/moby/buildkit/util/archutil/riscv64_check.go index c2f31f7e36f75d9cea378b71a578c9564640ebf9..c1272d0a464589d784e7e182ee21e3f607283d3c 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/riscv64_check.go @@ -1,6 +1,6 @@ // +build !riscv64 -package binfmt_misc +package archutil func riscv64Supported() error { return check(Binaryriscv64) diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check_riscv64.go b/vendor/github.com/moby/buildkit/util/archutil/riscv64_check_riscv64.go similarity index 76% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check_riscv64.go rename to vendor/github.com/moby/buildkit/util/archutil/riscv64_check_riscv64.go index 7ae573609cc281a3a3cbcaf90d94e3cbbd248d67..ceeb56075bde297da18066b1d8352316e84ece8b 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_check_riscv64.go +++ b/vendor/github.com/moby/buildkit/util/archutil/riscv64_check_riscv64.go @@ -1,6 +1,6 @@ // +build riscv64 -package binfmt_misc +package archutil func riscv64Supported() error { return nil diff --git a/vendor/github.com/moby/buildkit/util/archutil/s390x_binary.go b/vendor/github.com/moby/buildkit/util/archutil/s390x_binary.go new file mode 100644 index 0000000000000000000000000000000000000000..0f43a0b89ff6008398ee4eae2b7f3a833e702821 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/archutil/s390x_binary.go @@ -0,0 +1,8 @@ +// +build !s390x + +package archutil + +// This file is generated by running make inside the archutil package. +// Do not edit manually. + +const Binarys390x = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x62\x64\x80\x03\x26\x06\x31\x06\x06\x06\xb0\x00\x23\x03\xc3\x06\xa8\xa8\x03\x94\xbe\x00\xe5\x59\x30\x30\x31\x38\x30\x30\x33\x30\x41\xd5\xb2\x32\x20\x01\x46\x34\x9a\x81\x81\x61\x07\x2a\x2d\xc0\x90\x52\xf2\x34\x90\x81\x81\x81\x8d\x81\x34\x20\xb0\x5c\x93\x81\x81\x8b\x91\x9d\x9d\x41\xaf\x38\xa3\xb8\xa4\xa8\x24\x31\x89\x41\xaf\x24\xb5\xa2\x84\x81\x7a\x80\x1b\xc9\xe9\x6c\x68\xe1\x00\xa3\x39\xd0\xf4\xb0\xa0\x79\x9f\x19\x87\xd9\xb0\x70\x10\x44\x13\x87\x07\x15\x20\x00\x00\xff\xff\x28\x7b\x76\xee\x90\x01\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check.go b/vendor/github.com/moby/buildkit/util/archutil/s390x_check.go similarity index 79% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check.go rename to vendor/github.com/moby/buildkit/util/archutil/s390x_check.go index 1d5b4a08c3dfd19167524d5e2365fcb30308b7df..89f5cf4343006d5a7acc3aa429a1cb2f20e17449 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/s390x_check.go @@ -1,6 +1,6 @@ // +build !s390x -package binfmt_misc +package archutil func s390xSupported() error { return check(Binarys390x) diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check_s390x.go b/vendor/github.com/moby/buildkit/util/archutil/s390x_check_s390x.go similarity index 75% rename from vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check_s390x.go rename to vendor/github.com/moby/buildkit/util/archutil/s390x_check_s390x.go index 92554221ba63e83a028cebc3b506319d9db2487e..664bb15e18f25ed1b08b977c0c810a751126c938 100644 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_check_s390x.go +++ b/vendor/github.com/moby/buildkit/util/archutil/s390x_check_s390x.go @@ -1,6 +1,6 @@ // +build s390x -package binfmt_misc +package archutil func s390xSupported() error { return nil diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/386_binary.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/386_binary.go deleted file mode 100644 index 580f152f94476f827fb954a495814f262e089f90..0000000000000000000000000000000000000000 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/386_binary.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !386 - -package binfmt_misc - -// This file is generated by running make inside the binfmt_misc package. -// Do not edit manually. - -const Binary386 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\xd8\x31\x6e\xc2\x30\x14\x06\xe0\xdf\x8d\xdb\x26\x6a\x07\x1f\x20\xaa\x3a\x74\xe8\x64\xb5\x52\xae\x00\x2c\x88\x8d\x03\x80\x14\xc1\x94\x44\x89\x91\x60\x22\x47\x60\xe0\x20\x8c\x8c\x5c\x80\x13\x70\x19\xf4\xe2\x67\x91\x81\x25\xfb\xfb\xa4\x5f\x16\xcf\xe6\x29\xeb\x7b\xfb\xd1\x74\xac\x94\x42\xf0\x82\x08\xdd\xaf\x83\x8e\x33\x00\x7f\xc6\xd7\x33\x7c\x23\xc2\x2f\x74\xb8\x27\xad\x8e\x29\x27\x00\x14\x4d\x35\x03\x7f\x6f\x7c\x0f\x4a\x02\x80\xf2\xca\x75\x7a\x77\xa4\xb4\x3a\xa6\xa4\x00\x52\xfe\x7f\xc8\x27\xbf\x9f\xcc\xe6\xd4\xef\x42\xb5\xc7\x57\x0a\x21\x84\x10\x42\x08\x21\x84\x10\x62\x88\x33\x0d\xd5\xff\xb7\x6b\x0b\xdb\xac\x1b\x57\xbb\xc5\x12\xb6\x28\x5d\x6e\x57\xc5\xc6\x56\x75\x59\xe5\xb5\xdb\xc1\xba\x7c\xeb\x86\xf4\xfd\x00\xf0\xde\xed\x13\x78\xce\xe7\x19\x3f\xd0\x7c\x7e\xf1\x5c\xff\xc6\x3b\x07\x18\xbf\x2b\x08\x54\xef\x8c\x7a\xf5\xc4\x00\x3f\x4f\xde\xdd\x03\x00\x00\xff\xff\x8d\xf7\xd2\x72\xd0\x10\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_binary.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_binary.go deleted file mode 100644 index 3cafea92d41c3324783ab5fc9bcd9a20edd0bca3..0000000000000000000000000000000000000000 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/amd64_binary.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !amd64 - -package binfmt_misc - -// This file is generated by running make inside the binfmt_misc package. -// Do not edit manually. - -const Binaryamd64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x06\x3b\x06\x30\x4f\xc0\x01\xcc\x77\x80\x8a\x1b\x08\xc0\x95\x30\x38\x30\x58\x30\x30\x33\x38\x30\xb0\x30\x30\x83\xd5\xb2\x30\x20\x03\x07\x14\x9a\x03\x6a\x34\x8c\x66\x80\x9a\x03\xe2\xb2\x22\xf1\x61\xf6\xc1\x68\x1e\xa8\x30\x8c\x86\xa9\x63\x81\xe2\x17\x50\xe1\x17\x50\x7b\x60\xb4\x02\x54\x1c\x46\x73\x30\x20\xf4\x09\x40\xed\x74\xf7\x0b\x05\xd9\x7f\x80\x05\xea\x8e\x51\x30\x0a\x46\xc1\x28\x18\x05\xa3\x60\x14\x8c\x82\x51\x30\x0a\x46\xc1\x28\x18\x05\xa3\x60\xb8\x03\x8f\xe3\x07\x6c\x40\x94\xe1\x7f\x7e\x56\x06\xbd\xe2\x8c\xe2\x92\xa2\x92\xc4\x24\x06\xbd\xbc\xfc\x92\x54\xbd\xf4\xbc\x52\xbd\x82\xa2\xfc\x82\xd4\xa2\x92\x4a\x06\xbd\x92\xd4\x8a\x12\x8a\xed\xe3\x66\x60\x60\x60\x07\x8f\x33\xa0\xf7\xdf\x51\xfb\xed\x0c\x68\xfd\x77\x18\x90\x83\xf6\xd9\xd9\x18\xd0\xc7\x0d\xd0\xc6\x0b\x18\x10\xe3\x0c\xe8\x7c\x66\x2c\xee\xe2\x81\xea\x57\x21\xa0\x1f\x10\x00\x00\xff\xff\x8a\x1b\xd7\x73\x30\x11\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_binary.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_binary.go deleted file mode 100644 index 01cdf9a252b095270cc5111b51729f4efffa23d5..0000000000000000000000000000000000000000 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm64_binary.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !arm64 - -package binfmt_misc - -// This file is generated by running make inside the binfmt_misc package. -// Do not edit manually. - -const Binaryarm64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x86\xed\x0c\x20\x5e\x05\x83\x03\x98\xef\x00\x15\x9f\xc1\x80\x00\x0e\x0c\x16\x0c\x8c\x0c\x0e\x0c\xcc\x0c\x4c\x60\xb5\xac\x0c\x0c\x28\xb2\xc8\x74\x0b\x94\xd7\x02\x97\x87\xd9\xd5\x70\x69\x05\x77\xc3\x25\x46\x06\x86\x2b\x0c\x7a\xc5\x19\xc5\x25\x45\x25\x89\x49\x0c\x7a\x25\xa9\x15\x25\x0c\x54\x00\xdc\x50\x9b\xd8\xa0\x7c\x98\x7f\x2a\xa0\x7c\x1e\x34\xf5\x2c\x68\x7c\x90\x5e\x66\x2c\xe6\xc2\xfc\x21\x88\x45\x3d\x32\x00\x04\x00\x00\xff\xff\xe7\x30\x54\x02\x58\x01\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_binary.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_binary.go deleted file mode 100644 index c780578b7b77851f2704264304f7c8f2dde3be02..0000000000000000000000000000000000000000 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/arm_binary.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !arm - -package binfmt_misc - -// This file is generated by running make inside the binfmt_misc package. -// Do not edit manually. - -const Binaryarm = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\x8c\x8e\x31\x0e\x82\x40\x14\x44\xdf\x17\x50\x13\x6d\xf4\x04\xda\x51\x6d\xc5\x05\x28\xb4\xd2\xc6\x70\x00\x97\x84\x44\x3a\x02\xdf\xc4\xce\x4b\x78\x00\xee\xc6\x01\xbc\x82\x01\x17\xdd\xc2\xc2\x49\x26\x93\x7d\x3b\xc9\x9f\xfb\xee\xb0\x17\x11\x46\x4d\x88\xe9\x5f\x19\x42\x02\x3c\xde\x30\x4a\xd8\x20\xc4\x84\x04\x7c\xdb\x32\xf8\x0c\x83\xa3\x0f\x6b\x3b\xa9\xda\x0e\x78\xa6\x2b\xc0\x16\x36\x2f\x91\x19\x30\x17\x4c\x73\x69\xb4\x56\x9b\x63\xb4\xb8\x29\x26\x3d\x1d\x8d\x55\xad\xcb\xfc\xaa\x45\xc3\xdf\x5a\xb8\x6b\x53\xb7\x37\x03\x96\xde\x7f\xe8\xb2\x9f\x10\x40\x35\xf2\x7e\xeb\xda\xeb\x89\x97\x81\xc7\x6b\x60\xfb\xa3\xf7\x0a\x00\x00\xff\xff\x73\x8f\xca\xf1\x34\x01\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_binary.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_binary.go deleted file mode 100644 index 511db714c70af2c222b6ebe778d28ef4e800dfa2..0000000000000000000000000000000000000000 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/ppc64le_binary.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !ppc64le - -package binfmt_misc - -// This file is generated by running make inside the binfmt_misc package. -// Do not edit manually. - -const Binaryppc64le = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x06\x51\x06\x10\xaf\x82\x81\x41\x00\xc4\x77\x80\x8a\x2f\x80\xcb\x83\xc4\x2c\x18\x18\x19\x1c\x18\x58\x18\x98\xc1\x6a\x59\x19\x50\x80\x00\x32\xdd\x02\xe5\xb4\xc0\xa5\x19\x61\xa4\x05\x03\x43\x82\x05\x13\x03\x83\x0b\x83\x5e\x71\x46\x71\x49\x51\x49\x62\x12\x83\x5e\x49\x6a\x45\x09\x83\x5e\x6a\x46\x7c\x5a\x51\x62\x6e\x2a\x03\xc5\x80\x1b\x6a\x23\x1b\x94\x0f\xf3\x57\x05\x94\xcf\x83\xa6\x9e\x03\x8d\x2f\x08\xd5\xcf\x84\xf0\x87\x00\xaa\x7f\x50\x01\x0b\x1a\x1f\xa4\x97\x19\x8b\x3a\x98\x7e\x69\x2c\xea\x91\x01\x20\x00\x00\xff\xff\xce\xf7\x15\x75\xa0\x01\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_binary.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_binary.go deleted file mode 100644 index 146618e301111268b03bad3ee0144888502b4450..0000000000000000000000000000000000000000 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/riscv64_binary.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !riscv64 - -package binfmt_misc - -// This file is generated by running make inside the binfmt_misc package. -// Do not edit manually. - -const Binaryriscv64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x86\xcf\x0c\x20\x5e\x05\x03\x44\xcc\x01\x2a\x3e\x03\x4a\xb3\x80\xc5\x2c\x18\x18\x19\x1c\x18\x98\x19\x98\xc0\xaa\x58\x19\x90\x01\x23\x0a\xdd\x02\xe5\xc1\x68\x06\x01\x08\x25\xcc\xca\xc0\x30\x99\xe3\x02\x6b\x31\x88\xa3\x57\x9c\x51\x5c\x52\x54\x92\x98\xc4\xa0\x57\x92\x5a\x51\xc2\x40\x05\xc0\x0d\x75\x01\x1b\x94\x0f\xf3\x4f\x05\x94\xcf\x83\xa6\x9e\x05\x8d\x0f\x52\xcd\x8c\xc5\x5c\x98\x3f\x04\xb1\xa8\x47\x06\x80\x00\x00\x00\xff\xff\x39\x41\xdf\xa1\x58\x01\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_binary.go b/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_binary.go deleted file mode 100644 index 3d34c2e5ace3618680a00692c0dbea2ed0ddc113..0000000000000000000000000000000000000000 --- a/vendor/github.com/moby/buildkit/util/binfmt_misc/s390x_binary.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !s390x - -package binfmt_misc - -// This file is generated by running make inside the binfmt_misc package. -// Do not edit manually. - -const Binarys390x = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x62\x64\x80\x03\x26\x06\x31\x06\x06\x06\xb0\x00\x23\x03\x43\x05\x54\xd4\x01\x4a\xcf\x80\xf2\x2c\x18\x18\x19\x1c\x18\x98\x19\x98\xa0\x6a\x59\x19\x90\x00\x23\x1a\xcd\xc0\xc0\xd0\x80\x4a\x0b\x30\x2c\xd7\x64\x60\xe0\x62\x64\x67\x67\xd0\x2b\xce\x28\x2e\x29\x2a\x49\x4c\x62\xd0\x2b\x49\xad\x28\x61\xa0\x1e\xe0\x46\x72\x02\x1b\x9a\x7f\x60\x34\x07\x9a\x1e\x16\x34\x6f\x30\xe3\x30\x1b\xe6\x1f\x41\x34\x71\xb8\x97\x01\x01\x00\x00\xff\xff\x0c\x76\x9a\xe1\x58\x01\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/compression/compression.go b/vendor/github.com/moby/buildkit/util/compression/compression.go new file mode 100644 index 0000000000000000000000000000000000000000..f63d2934bc6b74bfb68b2288ab3cffaababca05e --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/compression/compression.go @@ -0,0 +1,136 @@ +package compression + +import ( + "bytes" + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Type represents compression type for blob data. +type Type int + +const ( + // Uncompressed indicates no compression. + Uncompressed Type = iota + + // Gzip is used for blob data. + Gzip + + // UnknownCompression means not supported yet. + UnknownCompression Type = -1 +) + +var Default = Gzip + +func (ct Type) String() string { + switch ct { + case Uncompressed: + return "uncompressed" + case Gzip: + return "gzip" + default: + return "unknown" + } +} + +// DetectLayerMediaType returns media type from existing blob data. +func DetectLayerMediaType(ctx context.Context, cs content.Store, id digest.Digest, oci bool) (string, error) { + ra, err := cs.ReaderAt(ctx, ocispec.Descriptor{Digest: id}) + if err != nil { + return "", err + } + defer ra.Close() + + ct, err := detectCompressionType(content.NewReader(ra)) + if err != nil { + return "", err + } + + switch ct { + case Uncompressed: + if oci { + return ocispec.MediaTypeImageLayer, nil + } + return images.MediaTypeDockerSchema2Layer, nil + case Gzip: + if oci { + return ocispec.MediaTypeImageLayerGzip, nil + } + return images.MediaTypeDockerSchema2LayerGzip, nil + default: + return "", errors.Errorf("failed to detect layer %v compression type", id) + } +} + +// detectCompressionType detects compression type from real blob data. +func detectCompressionType(cr io.Reader) (Type, error) { + var buf [10]byte + var n int + var err error + + if n, err = cr.Read(buf[:]); err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some + // odd cases where the layer.tar file will be empty (zero bytes) + // and we'll just treat it as a non-compressed stream and that + // means just create an empty layer. + // + // See issue docker/docker#18170 + return UnknownCompression, err + } + + for c, m := range map[Type][]byte{ + Gzip: {0x1F, 0x8B, 0x08}, + } { + if n < len(m) { + continue + } + if bytes.Equal(m, buf[:len(m)]) { + return c, nil + } + } + return Uncompressed, nil +} + +var toDockerLayerType = map[string]string{ + ocispec.MediaTypeImageLayer: images.MediaTypeDockerSchema2Layer, + images.MediaTypeDockerSchema2Layer: images.MediaTypeDockerSchema2Layer, + ocispec.MediaTypeImageLayerGzip: images.MediaTypeDockerSchema2LayerGzip, + images.MediaTypeDockerSchema2LayerGzip: images.MediaTypeDockerSchema2LayerGzip, +} + +var toOCILayerType = map[string]string{ + ocispec.MediaTypeImageLayer: ocispec.MediaTypeImageLayer, + images.MediaTypeDockerSchema2Layer: ocispec.MediaTypeImageLayer, + ocispec.MediaTypeImageLayerGzip: ocispec.MediaTypeImageLayerGzip, + images.MediaTypeDockerSchema2LayerGzip: ocispec.MediaTypeImageLayerGzip, +} + +func convertLayerMediaType(mediaType string, oci bool) string { + var converted string + if oci { + converted = toOCILayerType[mediaType] + } else { + converted = toDockerLayerType[mediaType] + } + if converted == "" { + logrus.Warnf("unhandled conversion for mediatype %q", mediaType) + return mediaType + } + return converted +} + +func ConvertAllLayerMediaTypes(oci bool, descs ...ocispec.Descriptor) []ocispec.Descriptor { + var converted []ocispec.Descriptor + for _, desc := range descs { + desc.MediaType = convertLayerMediaType(desc.MediaType, oci) + converted = append(converted, desc) + } + return converted +} diff --git a/vendor/github.com/moby/buildkit/util/contentutil/refs.go b/vendor/github.com/moby/buildkit/util/contentutil/refs.go index e62d7987bd3fa92932a4783aa6dd1c98b8694ae1..9e41ea07d28b8ecfd31b298957db98657429dedb 100644 --- a/vendor/github.com/moby/buildkit/util/contentutil/refs.go +++ b/vendor/github.com/moby/buildkit/util/contentutil/refs.go @@ -9,7 +9,7 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" - "github.com/docker/docker/pkg/locker" + "github.com/moby/locker" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" diff --git a/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go b/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go index 7c0fe84adbb85ad3e3e35e27c52c9a0d25f6d190..08e44083619e613474119aa00c23e4c6d534e8a4 100644 --- a/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go +++ b/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go @@ -154,7 +154,7 @@ func getFreeLoopID() (int, error) { } defer fd.Close() - const _LOOP_CTL_GET_FREE = 0x4C82 + const _LOOP_CTL_GET_FREE = 0x4C82 //nolint:golint r1, _, uerr := unix.Syscall(unix.SYS_IOCTL, fd.Fd(), _LOOP_CTL_GET_FREE, 0) if uerr == 0 { return int(r1), nil diff --git a/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go b/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go index acba2750ca2776e8f1206c795995fbb00520c892..f12f10bc8a10804f396ad3127f35c6f6427cacd5 100644 --- a/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go +++ b/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go @@ -1,11 +1,15 @@ package grpcerrors import ( + "encoding/json" + "errors" + + "github.com/containerd/typeurl" gogotypes "github.com/gogo/protobuf/types" "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/any" "github.com/moby/buildkit/util/stack" + "github.com/sirupsen/logrus" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -29,8 +33,12 @@ func ToGRPC(err error) error { st = status.New(Code(err), err.Error()) } if st.Code() != Code(err) { + code := Code(err) + if code == codes.OK { + code = codes.Unknown + } pb := st.Proto() - pb.Code = int32(Code(err)) + pb.Code = int32(code) st = status.FromProto(pb) } @@ -47,7 +55,7 @@ func ToGRPC(err error) error { }) if len(details) > 0 { - if st2, err := st.WithDetails(details...); err == nil { + if st2, err := withDetails(st, details...); err == nil { st = st2 } } @@ -55,6 +63,26 @@ func ToGRPC(err error) error { return st.Err() } +func withDetails(s *status.Status, details ...proto.Message) (*status.Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + p := s.Proto() + for _, detail := range details { + url, err := typeurl.TypeURL(detail) + if err != nil { + logrus.Warnf("ignoring typed error %T: not registered", detail) + continue + } + dt, err := json.Marshal(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, &any.Any{TypeUrl: url, Value: dt}) + } + return status.FromProto(p), nil +} + func Code(err error) codes.Code { if se, ok := err.(interface { Code() codes.Code @@ -72,9 +100,10 @@ func Code(err error) codes.Code { Unwrap() error }) if ok { - return Code(wrapped.Unwrap()) + if err := wrapped.Unwrap(); err != nil { + return Code(err) + } } - return status.FromContextError(err).Code() } @@ -96,7 +125,9 @@ func AsGRPCStatus(err error) (*status.Status, bool) { Unwrap() error }) if ok { - return AsGRPCStatus(wrapped.Unwrap()) + if err := wrapped.Unwrap(); err != nil { + return AsGRPCStatus(err) + } } return nil, false @@ -123,17 +154,9 @@ func FromGRPC(err error) error { // details that we don't understand are copied as proto for _, d := range pb.Details { - var m interface{} - detail := &ptypes.DynamicAny{} - if err := ptypes.UnmarshalAny(d, detail); err != nil { - detail := &gogotypes.DynamicAny{} - if err := gogotypes.UnmarshalAny(gogoAny(d), detail); err != nil { - n.Details = append(n.Details, d) - continue - } - m = detail.Message - } else { - m = detail.Message + m, err := typeurl.UnmarshalAny(gogoAny(d)) + if err != nil { + continue } switch v := m.(type) { @@ -144,7 +167,6 @@ func FromGRPC(err error) error { default: n.Details = append(n.Details, d) } - } err = status.FromProto(n).Err() @@ -159,6 +181,10 @@ func FromGRPC(err error) error { err = d.WrapError(err) } + if err != nil { + stack.Helper() + } + return stack.Enable(err) } @@ -167,6 +193,10 @@ type withCode struct { error } +func (e *withCode) Code() codes.Code { + return e.code +} + func (e *withCode) Unwrap() error { return e.error } diff --git a/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go b/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go index 77618c1ceee1dd0f25579d438b0785172057b2d3..1c17e4c67d27483aa457099c1294509ae4866de4 100644 --- a/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go +++ b/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go @@ -2,27 +2,53 @@ package grpcerrors import ( "context" + "log" + "os" + "github.com/moby/buildkit/util/stack" + "github.com/pkg/errors" "google.golang.org/grpc" ) func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { resp, err = handler(ctx, req) + oldErr := err if err != nil { + stack.Helper() err = ToGRPC(err) } + if oldErr != nil && err == nil { + logErr := errors.Wrap(err, "invalid grpc error conversion") + if os.Getenv("BUILDKIT_DEBUG_PANIC_ON_ERROR") == "1" { + panic(logErr) + } + log.Printf("%v", logErr) + err = oldErr + } + return resp, err } func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - return ToGRPC(handler(srv, ss)) + err := ToGRPC(handler(srv, ss)) + if err != nil { + stack.Helper() + } + return err } func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - return FromGRPC(invoker(ctx, method, req, reply, cc, opts...)) + err := FromGRPC(invoker(ctx, method, req, reply, cc, opts...)) + if err != nil { + stack.Helper() + } + return err } func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { s, err := streamer(ctx, desc, cc, method, opts...) + if err != nil { + stack.Helper() + } return s, ToGRPC(err) } diff --git a/vendor/github.com/moby/buildkit/util/imageutil/config.go b/vendor/github.com/moby/buildkit/util/imageutil/config.go index e0d9f1746ee4925a25a9df9d352ae6d1fb814e2e..80ed268f9e6cd33c555ee952e5ef8bbd4acad719 100644 --- a/vendor/github.com/moby/buildkit/util/imageutil/config.go +++ b/vendor/github.com/moby/buildkit/util/imageutil/config.go @@ -36,6 +36,12 @@ func CancelCacheLeases() { leasesMu.Unlock() } +func AddLease(f func(context.Context) error) { + leasesMu.Lock() + leasesF = append(leasesF, f) + leasesMu.Unlock() +} + func Config(ctx context.Context, str string, resolver remotes.Resolver, cache ContentCache, leaseManager leases.Manager, p *specs.Platform) (digest.Digest, []byte, error) { // TODO: fix buildkit to take interface instead of struct var platform platforms.MatchComparer @@ -57,9 +63,7 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co ctx = ctx2 defer func() { // this lease is not deleted to allow other components to access manifest/config from cache. It will be deleted after 5 min deadline or on pruning inactive builder - leasesMu.Lock() - leasesF = append(leasesF, done) - leasesMu.Unlock() + AddLease(done) }() } diff --git a/vendor/github.com/moby/buildkit/util/network/host.go b/vendor/github.com/moby/buildkit/util/network/host.go index dc58b1ce72e4319761a38f7552c3a3ab4d6d1813..09aefdfc253eaf2fe81f29ad1cc526ea2e742f6e 100644 --- a/vendor/github.com/moby/buildkit/util/network/host.go +++ b/vendor/github.com/moby/buildkit/util/network/host.go @@ -1,3 +1,5 @@ +// +build !windows + package network import ( @@ -19,8 +21,8 @@ func (h *host) New() (Namespace, error) { type hostNS struct { } -func (h *hostNS) Set(s *specs.Spec) { - oci.WithHostNamespace(specs.NetworkNamespace)(nil, nil, nil, s) +func (h *hostNS) Set(s *specs.Spec) error { + return oci.WithHostNamespace(specs.NetworkNamespace)(nil, nil, nil, s) } func (h *hostNS) Close() error { diff --git a/vendor/github.com/moby/buildkit/util/network/network.go b/vendor/github.com/moby/buildkit/util/network/network.go index 70b0cccad47d7385c0398fec2e3d8579900bace9..befeef0c751813f5d452245dfa316bee48d4c782 100644 --- a/vendor/github.com/moby/buildkit/util/network/network.go +++ b/vendor/github.com/moby/buildkit/util/network/network.go @@ -15,5 +15,5 @@ type Provider interface { type Namespace interface { io.Closer // Set the namespace on the spec - Set(*specs.Spec) + Set(*specs.Spec) error } diff --git a/vendor/github.com/moby/buildkit/util/network/none.go b/vendor/github.com/moby/buildkit/util/network/none.go index ebf1ebda941f28c43649c694057aa9f9203ad978..336ff68b91b8b4196696e1cce8803a8905eef129 100644 --- a/vendor/github.com/moby/buildkit/util/network/none.go +++ b/vendor/github.com/moby/buildkit/util/network/none.go @@ -18,7 +18,8 @@ func (h *none) New() (Namespace, error) { type noneNS struct { } -func (h *noneNS) Set(s *specs.Spec) { +func (h *noneNS) Set(s *specs.Spec) error { + return nil } func (h *noneNS) Close() error { diff --git a/vendor/github.com/moby/buildkit/util/progress/logs/logs.go b/vendor/github.com/moby/buildkit/util/progress/logs/logs.go index 54f6ff89657cf83e4406dfffd4c0a0b50b18b265..d54dfd3e52bc8e2332ae3a65a7f7ee9f09537992 100644 --- a/vendor/github.com/moby/buildkit/util/progress/logs/logs.go +++ b/vendor/github.com/moby/buildkit/util/progress/logs/logs.go @@ -2,15 +2,26 @@ package logs import ( "context" + "fmt" "io" + "math" "os" + "strconv" + "sync" + "time" "github.com/moby/buildkit/client" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/util/progress" "github.com/pkg/errors" + "github.com/tonistiigi/units" ) +var defaultMaxLogSize = 1024 * 1024 +var defaultMaxLogSpeed = 100 * 1024 // per second + +var configCheckOnce sync.Once + func NewLogStreams(ctx context.Context, printOutput bool) (io.WriteCloser, io.WriteCloser) { return newStreamWriter(ctx, 1, printOutput), newStreamWriter(ctx, 2, printOutput) } @@ -21,31 +32,92 @@ func newStreamWriter(ctx context.Context, stream int, printOutput bool) io.Write pw: pw, stream: stream, printOutput: printOutput, + created: time.Now(), } } type streamWriter struct { - pw progress.Writer - stream int - printOutput bool + pw progress.Writer + stream int + printOutput bool + created time.Time + size int + clipping bool + clipReasonSpeed bool } -func (sw *streamWriter) Write(dt []byte) (int, error) { - sw.pw.Write(identity.NewID(), client.VertexLog{ - Stream: sw.stream, - Data: append([]byte{}, dt...), +func (sw *streamWriter) checkLimit(n int) int { + configCheckOnce.Do(func() { + maxLogSize, err := strconv.ParseInt(os.Getenv("BUILDKIT_STEP_LOG_MAX_SIZE"), 10, 32) + if err == nil { + defaultMaxLogSize = int(maxLogSize) + } + maxLogSpeed, err := strconv.ParseInt(os.Getenv("BUILDKIT_STEP_LOG_MAX_SPEED"), 10, 32) + if err == nil { + defaultMaxLogSpeed = int(maxLogSpeed) + } }) - if sw.printOutput { - switch sw.stream { - case 1: - return os.Stdout.Write(dt) - case 2: - return os.Stderr.Write(dt) - default: - return 0, errors.Errorf("invalid stream %d", sw.stream) + + oldSize := sw.size + sw.size += n + + maxSize := -1 + if defaultMaxLogSpeed != -1 { + maxSize = int(math.Ceil(time.Since(sw.created).Seconds())) * defaultMaxLogSpeed + sw.clipReasonSpeed = true + } + if maxSize > defaultMaxLogSize { + maxSize = defaultMaxLogSize + sw.clipReasonSpeed = false + } + if maxSize < oldSize { + return 0 + } + + if maxSize != -1 { + if sw.size > maxSize { + return maxSize - oldSize + } + } + return n +} + +func (sw *streamWriter) clipLimitMessage() string { + if sw.clipReasonSpeed { + return fmt.Sprintf("%#g/s", units.Bytes(defaultMaxLogSpeed)) + } + return fmt.Sprintf("%#g", units.Bytes(defaultMaxLogSize)) +} + +func (sw *streamWriter) Write(dt []byte) (int, error) { + oldSize := len(dt) + dt = append([]byte{}, dt[:sw.checkLimit(len(dt))]...) + + if sw.clipping && oldSize == len(dt) { + sw.clipping = false + } + if !sw.clipping && oldSize != len(dt) { + dt = append(dt, []byte(fmt.Sprintf("\n[output clipped, log limit %s reached]\n", sw.clipLimitMessage()))...) + sw.clipping = true + } + + if len(dt) != 0 { + sw.pw.Write(identity.NewID(), client.VertexLog{ + Stream: sw.stream, + Data: dt, + }) + if sw.printOutput { + switch sw.stream { + case 1: + return os.Stdout.Write(dt) + case 2: + return os.Stderr.Write(dt) + default: + return 0, errors.Errorf("invalid stream %d", sw.stream) + } } } - return len(dt), nil + return oldSize, nil } func (sw *streamWriter) Close() error { diff --git a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go index 51989368ce5dea0eb65233bf1eddf31b5ca74894..1ce37ea210b30f1427a0c30f6cc99ad3d6a953c5 100644 --- a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go +++ b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go @@ -15,7 +15,6 @@ type MultiWriter struct { mu sync.Mutex items []*Progress writers map[rawProgressWriter]struct{} - done bool meta map[string]interface{} } diff --git a/vendor/github.com/moby/buildkit/util/progress/progress.go b/vendor/github.com/moby/buildkit/util/progress/progress.go index ffe3d88b55cddfe3f5dd6373f064bf955cb067a1..3ce212948c2d556310dddea0216637b3c1613dac 100644 --- a/vendor/github.com/moby/buildkit/util/progress/progress.go +++ b/vendor/github.com/moby/buildkit/util/progress/progress.go @@ -62,6 +62,11 @@ func WithMetadata(key string, val interface{}) WriterOption { } } +type Controller interface { + Start(context.Context) (context.Context, func(error)) + Status(id string, action string) func() +} + type Writer interface { Write(id string, value interface{}) error Close() error diff --git a/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go b/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go new file mode 100644 index 0000000000000000000000000000000000000000..f16a06998d200d476ef3fda582c8c81782abf788 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go @@ -0,0 +1,138 @@ +package pullprogress + +import ( + "context" + "io" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/remotes" + "github.com/moby/buildkit/util/progress" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type PullManager interface { + content.IngestManager + content.Manager +} + +type ProviderWithProgress struct { + Provider content.Provider + Manager PullManager +} + +func (p *ProviderWithProgress) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + ra, err := p.Provider.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(ctx) + doneCh := make(chan struct{}) + go trackProgress(ctx, desc, p.Manager, doneCh) + return readerAtWithCancel{ReaderAt: ra, cancel: cancel, doneCh: doneCh}, nil +} + +type readerAtWithCancel struct { + content.ReaderAt + cancel func() + doneCh <-chan struct{} +} + +func (ra readerAtWithCancel) Close() error { + ra.cancel() + select { + case <-ra.doneCh: + case <-time.After(time.Second): + logrus.Warn("timeout waiting for pull progress to complete") + } + return ra.ReaderAt.Close() +} + +type FetcherWithProgress struct { + Fetcher remotes.Fetcher + Manager PullManager +} + +func (f *FetcherWithProgress) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + rc, err := f.Fetcher.Fetch(ctx, desc) + if err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(ctx) + doneCh := make(chan struct{}) + go trackProgress(ctx, desc, f.Manager, doneCh) + return readerWithCancel{ReadCloser: rc, cancel: cancel, doneCh: doneCh}, nil +} + +type readerWithCancel struct { + io.ReadCloser + cancel func() + doneCh <-chan struct{} +} + +func (r readerWithCancel) Close() error { + r.cancel() + select { + case <-r.doneCh: + case <-time.After(time.Second): + logrus.Warn("timeout waiting for pull progress to complete") + } + return r.ReadCloser.Close() +} + +func trackProgress(ctx context.Context, desc ocispec.Descriptor, manager PullManager, doneCh chan<- struct{}) { + + defer close(doneCh) + + ticker := time.NewTicker(150 * time.Millisecond) + defer ticker.Stop() + go func() { + <-ctx.Done() + ticker.Stop() + }() + + pw, _, _ := progress.FromContext(ctx) + defer pw.Close() + + ingestRef := remotes.MakeRefKey(ctx, desc) + + started := time.Now() + onFinalStatus := false + for !onFinalStatus { + select { + case <-ctx.Done(): + onFinalStatus = true + case <-ticker.C: + } + + status, err := manager.Status(ctx, ingestRef) + if err == nil { + pw.Write(desc.Digest.String(), progress.Status{ + Current: int(status.Offset), + Total: int(status.Total), + Started: &started, + }) + continue + } else if !errors.Is(err, errdefs.ErrNotFound) { + logrus.Errorf("unexpected error getting ingest status of %q: %v", ingestRef, err) + return + } + + info, err := manager.Info(ctx, desc.Digest) + if err == nil { + pw.Write(desc.Digest.String(), progress.Status{ + Current: int(info.Size), + Total: int(info.Size), + Started: &started, + Completed: &info.CreatedAt, + }) + return + } + + } +} diff --git a/vendor/github.com/moby/buildkit/util/resolver/authorizer.go b/vendor/github.com/moby/buildkit/util/resolver/authorizer.go new file mode 100644 index 0000000000000000000000000000000000000000..80e2a35191f70090f0ff09d4000c1bb93c697a2b --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/resolver/authorizer.go @@ -0,0 +1,489 @@ +package resolver + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "sort" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes/docker" + "github.com/containerd/containerd/remotes/docker/auth" + remoteserrors "github.com/containerd/containerd/remotes/errors" + "github.com/moby/buildkit/session" + sessionauth "github.com/moby/buildkit/session/auth" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type authHandlerNS struct { + counter int64 // needs to be 64bit aligned for 32bit systems + + mu sync.Mutex + handlers map[string]*authHandler + hosts map[string][]docker.RegistryHost + sm *session.Manager + g flightcontrol.Group +} + +func newAuthHandlerNS(sm *session.Manager) *authHandlerNS { + return &authHandlerNS{ + handlers: map[string]*authHandler{}, + hosts: map[string][]docker.RegistryHost{}, + sm: sm, + } +} + +func (a *authHandlerNS) get(host string, sm *session.Manager, g session.Group) *authHandler { + if g != nil { + if iter := g.SessionIterator(); iter != nil { + for { + id := iter.NextSession() + if id == "" { + break + } + h, ok := a.handlers[host+"/"+id] + if ok { + h.lastUsed = time.Now() + return h + } + } + } + } + + // link another handler + for k, h := range a.handlers { + parts := strings.SplitN(k, "/", 2) + if len(parts) != 2 { + continue + } + if parts[0] == host { + if h.authority != nil { + session, ok, err := sessionauth.VerifyTokenAuthority(host, h.authority, sm, g) + if err == nil && ok { + a.handlers[host+"/"+session] = h + h.lastUsed = time.Now() + return h + } + } else { + session, username, password, err := sessionauth.CredentialsFunc(sm, g)(host) + if err == nil { + if username == h.common.Username && password == h.common.Secret { + a.handlers[host+"/"+session] = h + h.lastUsed = time.Now() + return h + } + } + } + } + } + + return nil +} + +func (a *authHandlerNS) set(host, session string, h *authHandler) { + a.handlers[host+"/"+session] = h +} + +func (a *authHandlerNS) delete(h *authHandler) { + for k, v := range a.handlers { + if v == h { + delete(a.handlers, k) + } + } +} + +type dockerAuthorizer struct { + client *http.Client + + sm *session.Manager + session session.Group + handlers *authHandlerNS +} + +func newDockerAuthorizer(client *http.Client, handlers *authHandlerNS, sm *session.Manager, group session.Group) *dockerAuthorizer { + return &dockerAuthorizer{ + client: client, + handlers: handlers, + sm: sm, + session: group, + } +} + +// Authorize handles auth request. +func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error { + a.handlers.mu.Lock() + defer a.handlers.mu.Unlock() + + // skip if there is no auth handler + ah := a.handlers.get(req.URL.Host, a.sm, a.session) + if ah == nil { + return nil + } + + auth, err := ah.authorize(ctx, a.sm, a.session) + if err != nil { + return err + } + + req.Header.Set("Authorization", auth) + return nil +} + +func (a *dockerAuthorizer) getCredentials(host string) (sessionID, username, secret string, err error) { + return sessionauth.CredentialsFunc(a.sm, a.session)(host) +} + +func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error { + a.handlers.mu.Lock() + defer a.handlers.mu.Unlock() + + last := responses[len(responses)-1] + host := last.Request.URL.Host + + handler := a.handlers.get(host, a.sm, a.session) + + for _, c := range auth.ParseAuthHeader(last.Header) { + if c.Scheme == auth.BearerAuth { + var oldScopes []string + if err := invalidAuthorization(c, responses); err != nil { + a.handlers.delete(handler) + + if handler != nil { + oldScopes = handler.common.Scopes + } + handler = nil + + // this hacky way seems to be best method to detect that error is fatal and should not be retried with a new token + if c.Parameters["error"] == "insufficient_scope" && parseScopes(oldScopes).contains(parseScopes(strings.Split(c.Parameters["scope"], " "))) { + return err + } + } + + // reuse existing handler + // + // assume that one registry will return the common + // challenge information, including realm and service. + // and the resource scope is only different part + // which can be provided by each request. + if handler != nil { + return nil + } + + var username, secret string + session, pubKey, err := sessionauth.GetTokenAuthority(host, a.sm, a.session) + if err != nil { + return err + } + if pubKey == nil { + session, username, secret, err = a.getCredentials(host) + if err != nil { + return err + } + } + + common, err := auth.GenerateTokenOptions(ctx, host, username, secret, c) + if err != nil { + return err + } + common.Scopes = parseScopes(append(common.Scopes, oldScopes...)).normalize() + + a.handlers.set(host, session, newAuthHandler(host, a.client, c.Scheme, pubKey, common)) + + return nil + } else if c.Scheme == auth.BasicAuth { + session, username, secret, err := a.getCredentials(host) + if err != nil { + return err + } + + if username != "" && secret != "" { + common := auth.TokenOptions{ + Username: username, + Secret: secret, + } + + a.handlers.set(host, session, newAuthHandler(host, a.client, c.Scheme, nil, common)) + + return nil + } + } + } + return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") +} + +// authResult is used to control limit rate. +type authResult struct { + sync.WaitGroup + token string + err error + expires time.Time +} + +// authHandler is used to handle auth request per registry server. +type authHandler struct { + sync.Mutex + + client *http.Client + + // only support basic and bearer schemes + scheme auth.AuthenticationScheme + + // common contains common challenge answer + common auth.TokenOptions + + // scopedTokens caches token indexed by scopes, which used in + // bearer auth case + scopedTokens map[string]*authResult + + lastUsed time.Time + + host string + + authority *[32]byte +} + +func newAuthHandler(host string, client *http.Client, scheme auth.AuthenticationScheme, authority *[32]byte, opts auth.TokenOptions) *authHandler { + return &authHandler{ + host: host, + client: client, + scheme: scheme, + common: opts, + scopedTokens: map[string]*authResult{}, + lastUsed: time.Now(), + authority: authority, + } +} + +func (ah *authHandler) authorize(ctx context.Context, sm *session.Manager, g session.Group) (string, error) { + switch ah.scheme { + case auth.BasicAuth: + return ah.doBasicAuth(ctx) + case auth.BearerAuth: + return ah.doBearerAuth(ctx, sm, g) + default: + return "", errors.Wrapf(errdefs.ErrNotImplemented, "failed to find supported auth scheme: %s", string(ah.scheme)) + } +} + +func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) { + username, secret := ah.common.Username, ah.common.Secret + + if username == "" || secret == "" { + return "", fmt.Errorf("failed to handle basic auth because missing username or secret") + } + + auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret)) + return fmt.Sprintf("Basic %s", auth), nil +} + +func (ah *authHandler) doBearerAuth(ctx context.Context, sm *session.Manager, g session.Group) (token string, err error) { + // copy common tokenOptions + to := ah.common + + to.Scopes = parseScopes(docker.GetTokenScopes(ctx, to.Scopes)).normalize() + + // Docs: https://docs.docker.com/registry/spec/auth/scope + scoped := strings.Join(to.Scopes, " ") + + ah.Lock() + for { + r, exist := ah.scopedTokens[scoped] + if !exist { + // no entry cached + break + } + ah.Unlock() + r.Wait() + if r.err != nil { + select { + case <-ctx.Done(): + return "", r.err + default: + } + } + if !errors.Is(r.err, context.Canceled) && + (r.expires.IsZero() || r.expires.After(time.Now())) { + return r.token, r.err + } + // r.err is canceled or token expired. Get rid of it and try again + ah.Lock() + r2, exist := ah.scopedTokens[scoped] + if exist && r == r2 { + delete(ah.scopedTokens, scoped) + } + } + + // only one fetch token job + r := new(authResult) + r.Add(1) + ah.scopedTokens[scoped] = r + ah.Unlock() + + var issuedAt time.Time + var expires int + defer func() { + token = fmt.Sprintf("Bearer %s", token) + r.token, r.err = token, err + if err == nil { + if issuedAt.IsZero() { + issuedAt = time.Now() + } + if exp := issuedAt.Add(time.Duration(float64(expires)*0.9) * time.Second); time.Now().Before(exp) { + r.expires = exp + } + } + r.Done() + }() + + if ah.authority != nil { + resp, err := sessionauth.FetchToken(&sessionauth.FetchTokenRequest{ + ClientID: "buildkit-client", + Host: ah.host, + Realm: to.Realm, + Service: to.Service, + Scopes: to.Scopes, + }, sm, g) + if err != nil { + return "", err + } + issuedAt, expires = time.Unix(resp.IssuedAt, 0), int(resp.ExpiresIn) + return resp.Token, nil + } + + // fetch token for the resource scope + if to.Secret != "" { + defer func() { + err = errors.Wrap(err, "failed to fetch oauth token") + }() + // try GET first because Docker Hub does not support POST + // switch once support has landed + resp, err := auth.FetchToken(ctx, ah.client, nil, to) + if err != nil { + var errStatus remoteserrors.ErrUnexpectedStatus + if errors.As(err, &errStatus) { + // retry with POST request + // As of September 2017, GCR is known to return 404. + // As of February 2018, JFrog Artifactory is known to return 401. + if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 { + resp, err := auth.FetchTokenWithOAuth(ctx, ah.client, nil, "buildkit-client", to) + if err != nil { + return "", err + } + issuedAt, expires = resp.IssuedAt, resp.ExpiresIn + return resp.AccessToken, nil + } + log.G(ctx).WithFields(logrus.Fields{ + "status": errStatus.Status, + "body": string(errStatus.Body), + }).Debugf("token request failed") + } + return "", err + } + issuedAt, expires = resp.IssuedAt, resp.ExpiresIn + return resp.Token, nil + } + // do request anonymously + resp, err := auth.FetchToken(ctx, ah.client, nil, to) + if err != nil { + return "", errors.Wrap(err, "failed to fetch anonymous token") + } + issuedAt, expires = resp.IssuedAt, resp.ExpiresIn + + return resp.Token, nil +} + +func invalidAuthorization(c auth.Challenge, responses []*http.Response) error { + errStr := c.Parameters["error"] + if errStr == "" { + return nil + } + + n := len(responses) + if n == 1 || (n > 1 && !sameRequest(responses[n-2].Request, responses[n-1].Request)) { + return nil + } + + return errors.Wrapf(docker.ErrInvalidAuthorization, "server message: %s", errStr) +} + +func sameRequest(r1, r2 *http.Request) bool { + if r1.Method != r2.Method { + return false + } + if *r1.URL != *r2.URL { + return false + } + return true +} + +type scopes map[string]map[string]struct{} + +func parseScopes(s []string) scopes { + // https://docs.docker.com/registry/spec/auth/scope/ + m := map[string]map[string]struct{}{} + for _, scope := range s { + parts := strings.SplitN(scope, ":", 3) + names := []string{parts[0]} + if len(parts) > 1 { + names = append(names, parts[1]) + } + var actions []string + if len(parts) == 3 { + actions = append(actions, strings.Split(parts[2], ",")...) + } + name := strings.Join(names, ":") + ma, ok := m[name] + if !ok { + ma = map[string]struct{}{} + m[name] = ma + } + + for _, a := range actions { + ma[a] = struct{}{} + } + } + return m +} + +func (s scopes) normalize() []string { + names := make([]string, 0, len(s)) + for n := range s { + names = append(names, n) + } + sort.Strings(names) + + out := make([]string, 0, len(s)) + + for _, n := range names { + actions := make([]string, 0, len(s[n])) + for a := range s[n] { + actions = append(actions, a) + } + sort.Strings(actions) + + out = append(out, n+":"+strings.Join(actions, ",")) + } + return out +} + +func (s scopes) contains(s2 scopes) bool { + for n := range s2 { + v, ok := s[n] + if !ok { + return false + } + for a := range s2[n] { + if _, ok := v[a]; !ok { + return false + } + } + } + return true +} diff --git a/vendor/github.com/moby/buildkit/util/resolver/pool.go b/vendor/github.com/moby/buildkit/util/resolver/pool.go new file mode 100644 index 0000000000000000000000000000000000000000..20344eaa35f105d9be98d37e013a41c9e153898a --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/resolver/pool.go @@ -0,0 +1,206 @@ +package resolver + +import ( + "context" + "fmt" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + distreference "github.com/docker/distribution/reference" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/source" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultPool is the default shared resolver pool instance +var DefaultPool = NewPool() + +// Pool is a cache of recently used resolvers +type Pool struct { + mu sync.Mutex + m map[string]*authHandlerNS +} + +// NewPool creates a new pool for caching resolvers +func NewPool() *Pool { + p := &Pool{ + m: map[string]*authHandlerNS{}, + } + time.AfterFunc(5*time.Minute, p.gc) + return p +} + +func (p *Pool) gc() { + p.mu.Lock() + defer p.mu.Unlock() + + for k, ns := range p.m { + ns.mu.Lock() + for key, h := range ns.handlers { + if time.Since(h.lastUsed) < 10*time.Minute { + continue + } + parts := strings.SplitN(key, "/", 2) + if len(parts) != 2 { + delete(ns.handlers, key) + continue + } + c, err := ns.sm.Get(context.TODO(), parts[1], true) + if c == nil || err != nil { + delete(ns.handlers, key) + } + } + if len(ns.handlers) == 0 { + delete(p.m, k) + } + ns.mu.Unlock() + } + + time.AfterFunc(5*time.Minute, p.gc) +} + +// Clear deletes currently cached items. This may be called on config changes for example. +func (p *Pool) Clear() { + p.mu.Lock() + defer p.mu.Unlock() + p.m = map[string]*authHandlerNS{} +} + +// GetResolver gets a resolver for a specified scope from the pool +func (p *Pool) GetResolver(hosts docker.RegistryHosts, ref, scope string, sm *session.Manager, g session.Group) *Resolver { + name := ref + named, err := distreference.ParseNormalizedNamed(ref) + if err == nil { + name = named.Name() + } + + key := fmt.Sprintf("%s::%s", name, scope) + + p.mu.Lock() + defer p.mu.Unlock() + h, ok := p.m[key] + if !ok { + h = newAuthHandlerNS(sm) + p.m[key] = h + } + return newResolver(hosts, h, sm, g) +} + +func newResolver(hosts docker.RegistryHosts, handler *authHandlerNS, sm *session.Manager, g session.Group) *Resolver { + if hosts == nil { + hosts = docker.ConfigureDefaultRegistries( + docker.WithClient(newDefaultClient()), + docker.WithPlainHTTP(docker.MatchLocalhost), + ) + } + r := &Resolver{ + hosts: hosts, + sm: sm, + g: g, + handler: handler, + } + r.Resolver = docker.NewResolver(docker.ResolverOptions{ + Hosts: r.hostsFunc, + }) + return r +} + +// Resolver is a wrapper around remotes.Resolver +type Resolver struct { + remotes.Resolver + hosts docker.RegistryHosts + sm *session.Manager + g session.Group + handler *authHandlerNS + auth *dockerAuthorizer + + is images.Store + mode source.ResolveMode +} + +func (r *Resolver) hostsFunc(host string) ([]docker.RegistryHost, error) { + return func(domain string) ([]docker.RegistryHost, error) { + v, err := r.handler.g.Do(context.TODO(), domain, func(ctx context.Context) (interface{}, error) { + // long lock not needed because flightcontrol.Do + r.handler.mu.Lock() + v, ok := r.handler.hosts[domain] + r.handler.mu.Unlock() + if ok { + return v, nil + } + res, err := r.hosts(domain) + if err != nil { + return nil, err + } + r.handler.mu.Lock() + r.handler.hosts[domain] = res + r.handler.mu.Unlock() + return res, nil + }) + if err != nil || v == nil { + return nil, err + } + res := v.([]docker.RegistryHost) + if len(res) == 0 { + return nil, nil + } + auth := newDockerAuthorizer(res[0].Client, r.handler, r.sm, r.g) + for i := range res { + res[i].Authorizer = auth + } + return res, nil + }(host) +} + +// WithSession returns a new resolver that works with new session group +func (r *Resolver) WithSession(s session.Group) *Resolver { + r2 := *r + r2.auth = nil + r2.g = s + return &r2 +} + +// WithImageStore returns new resolver that can also resolve from local images store +func (r *Resolver) WithImageStore(is images.Store, mode source.ResolveMode) *Resolver { + r2 := *r + r2.Resolver = r.Resolver + r2.is = is + r2.mode = mode + return &r2 +} + +// Fetcher returns a new fetcher for the provided reference. +func (r *Resolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { + if atomic.LoadInt64(&r.handler.counter) == 0 { + r.Resolve(ctx, ref) + } + return r.Resolver.Fetcher(ctx, ref) +} + +// Resolve attempts to resolve the reference into a name and descriptor. +func (r *Resolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) { + if r.mode == source.ResolveModePreferLocal && r.is != nil { + if img, err := r.is.Get(ctx, ref); err == nil { + return ref, img.Target, nil + } + } + + n, desc, err := r.Resolver.Resolve(ctx, ref) + if err == nil { + atomic.AddInt64(&r.handler.counter, 1) + return n, desc, err + } + + if r.mode == source.ResolveModeDefault && r.is != nil { + if img, err := r.is.Get(ctx, ref); err == nil { + return ref, img.Target, nil + } + } + + return "", ocispec.Descriptor{}, err +} diff --git a/vendor/github.com/moby/buildkit/util/resolver/resolver.go b/vendor/github.com/moby/buildkit/util/resolver/resolver.go index e7b3c97ea47d847e02b4c52249dc3f95bfd58eab..42c940b3d60ad01ffbd50c953290ca1705abfcce 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/resolver.go +++ b/vendor/github.com/moby/buildkit/util/resolver/resolver.go @@ -10,41 +10,59 @@ import ( "path/filepath" "runtime" "strings" - "sync" "time" - "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/moby/buildkit/cmd/buildkitd/config" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/auth" "github.com/moby/buildkit/util/tracing" "github.com/pkg/errors" ) -func fillInsecureOpts(host string, c config.RegistryConfig, h *docker.RegistryHost) error { +func fillInsecureOpts(host string, c config.RegistryConfig, h docker.RegistryHost) ([]docker.RegistryHost, error) { + var hosts []docker.RegistryHost + tc, err := loadTLSConfig(c) if err != nil { - return err + return nil, err } + var isHTTP bool if c.PlainHTTP != nil && *c.PlainHTTP { - h.Scheme = "http" - } else if c.Insecure != nil && *c.Insecure { - tc.InsecureSkipVerify = true - } else if c.PlainHTTP == nil { + isHTTP = true + } + if c.PlainHTTP == nil { if ok, _ := docker.MatchLocalhost(host); ok { - h.Scheme = "http" + isHTTP = true + } + } + + if isHTTP { + h2 := h + h2.Scheme = "http" + hosts = append(hosts, h2) + } + if c.Insecure != nil && *c.Insecure { + h2 := h + transport := newDefaultTransport() + transport.TLSClientConfig = tc + h2.Client = &http.Client{ + Transport: tracing.NewTransport(transport), } + tc.InsecureSkipVerify = true + hosts = append(hosts, h2) } - transport := newDefaultTransport() - transport.TLSClientConfig = tc + if len(hosts) == 0 { + transport := newDefaultTransport() + transport.TLSClientConfig = tc - h.Client = &http.Client{ - Transport: tracing.NewTransport(transport), + h.Client = &http.Client{ + Transport: tracing.NewTransport(transport), + } + hosts = append(hosts, h) } - return nil + + return hosts, nil } func loadTLSConfig(c config.RegistryConfig) (*tls.Config, error) { @@ -97,6 +115,7 @@ func loadTLSConfig(c config.RegistryConfig) (*tls.Config, error) { return tc, nil } +// NewRegistryConfig converts registry config to docker.RegistryHosts callback func NewRegistryConfig(m map[string]config.RegistryConfig) docker.RegistryHosts { return docker.Registries( func(host string) ([]docker.RegistryHost, error) { @@ -116,11 +135,12 @@ func NewRegistryConfig(m map[string]config.RegistryConfig) docker.RegistryHosts Capabilities: docker.HostCapabilityPull | docker.HostCapabilityResolve, } - if err := fillInsecureOpts(mirror, m[mirror], &h); err != nil { + hosts, err := fillInsecureOpts(mirror, m[mirror], h) + if err != nil { return nil, err } - out = append(out, h) + out = append(out, hosts...) } if host == "docker.io" { @@ -135,11 +155,12 @@ func NewRegistryConfig(m map[string]config.RegistryConfig) docker.RegistryHosts Capabilities: docker.HostCapabilityPush | docker.HostCapabilityPull | docker.HostCapabilityResolve, } - if err := fillInsecureOpts(host, c, &h); err != nil { + hosts, err := fillInsecureOpts(host, c, h) + if err != nil { return nil, err } - out = append(out, h) + out = append(out, hosts...) return out, nil }, docker.ConfigureDefaultRegistries( @@ -149,92 +170,9 @@ func NewRegistryConfig(m map[string]config.RegistryConfig) docker.RegistryHosts ) } -type SessionAuthenticator struct { - sm *session.Manager - groups []session.Group - mu sync.RWMutex - cache map[string]credentials - cacheMu sync.RWMutex -} - -type credentials struct { - user string - secret string - created time.Time -} - -func NewSessionAuthenticator(sm *session.Manager, g session.Group) *SessionAuthenticator { - return &SessionAuthenticator{sm: sm, groups: []session.Group{g}, cache: map[string]credentials{}} -} - -func (a *SessionAuthenticator) credentials(h string) (string, string, error) { - const credentialsTimeout = time.Minute - - a.cacheMu.RLock() - c, ok := a.cache[h] - if ok && time.Since(c.created) < credentialsTimeout { - a.cacheMu.RUnlock() - return c.user, c.secret, nil - } - a.cacheMu.RUnlock() - - a.mu.RLock() - defer a.mu.RUnlock() - - var err error - for i := len(a.groups) - 1; i >= 0; i-- { - var user, secret string - user, secret, err = auth.CredentialsFunc(a.sm, a.groups[i])(h) - if err != nil { - continue - } - a.cacheMu.Lock() - a.cache[h] = credentials{user: user, secret: secret, created: time.Now()} - a.cacheMu.Unlock() - return user, secret, nil - } - return "", "", err -} - -func (a *SessionAuthenticator) AddSession(g session.Group) { - a.mu.Lock() - a.groups = append(a.groups, g) - a.mu.Unlock() -} - -func New(hosts docker.RegistryHosts, auth *SessionAuthenticator) remotes.Resolver { - return docker.NewResolver(docker.ResolverOptions{ - Hosts: hostsWithCredentials(hosts, auth), - }) -} - -func hostsWithCredentials(hosts docker.RegistryHosts, auth *SessionAuthenticator) docker.RegistryHosts { - if hosts == nil { - return nil - } - return func(domain string) ([]docker.RegistryHost, error) { - res, err := hosts(domain) - if err != nil { - return nil, err - } - if len(res) == 0 { - return nil, nil - } - - a := docker.NewDockerAuthorizer( - docker.WithAuthClient(res[0].Client), - docker.WithAuthCreds(auth.credentials), - ) - for i := range res { - res[i].Authorizer = a - } - return res, nil - } -} - func newDefaultClient() *http.Client { return &http.Client{ - Transport: newDefaultTransport(), + Transport: tracing.NewTransport(newDefaultTransport()), } } @@ -250,14 +188,12 @@ func newDefaultTransport() *http.Transport { Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, + KeepAlive: 60 * time.Second, }).DialContext, MaxIdleConns: 10, IdleConnTimeout: 30 * time.Second, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 5 * time.Second, - DisableKeepAlives: true, TLSNextProto: make(map[string]func(authority string, c *tls.Conn) http.RoundTripper), } } diff --git a/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_nonlinux.go b/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_nonlinux.go new file mode 100644 index 0000000000000000000000000000000000000000..bb08ec1945aa6c4014804155b9bde8574db199e3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_nonlinux.go @@ -0,0 +1,19 @@ +// +build !linux + +package specconv + +import ( + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// ToRootless converts spec to be compatible with "rootless" runc. +// * Remove /sys mount +// * Remove cgroups +// +// See docs/rootless.md for the supported runc revision. +func ToRootless(spec *specs.Spec) error { + return errors.Errorf("not implemented on on %s", runtime.GOOS) +} diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.go b/vendor/github.com/moby/buildkit/util/stack/stack.go index 6d3cfc833232493aeab8495a9e9368882ab4cf68..3409ac047af03b70094b78bcd575b3cdc79ca678 100644 --- a/vendor/github.com/moby/buildkit/util/stack/stack.go +++ b/vendor/github.com/moby/buildkit/util/stack/stack.go @@ -4,12 +4,24 @@ import ( "fmt" io "io" "os" + "runtime" "strconv" "strings" + "sync" + "github.com/containerd/typeurl" "github.com/pkg/errors" ) +var helpers map[string]struct{} +var helpersMu sync.RWMutex + +func init() { + typeurl.Register((*Stack)(nil), "github.com/moby/buildkit", "stack.Stack+json") + + helpers = map[string]struct{}{} +} + var version string var revision string @@ -18,6 +30,19 @@ func SetVersionInfo(v, r string) { revision = r } +func Helper() { + var pc [1]uintptr + n := runtime.Callers(2, pc[:]) + if n == 0 { + return + } + frames := runtime.CallersFrames(pc[:n]) + frame, _ := frames.Next() + helpersMu.Lock() + helpers[frame.Function] = struct{}{} + helpersMu.Unlock() +} + func Traces(err error) []*Stack { var st []*Stack @@ -47,6 +72,7 @@ func Enable(err error) error { if err == nil { return nil } + Helper() if !hasLocalStackTrace(err) { return errors.WithStack(err) } @@ -107,6 +133,8 @@ func (w *formatter) Format(s fmt.State, verb rune) { func convertStack(s errors.StackTrace) *Stack { var out Stack + helpersMu.RLock() + defer helpersMu.RUnlock() for _, f := range s { dt, err := f.MarshalText() if err != nil { @@ -116,6 +144,9 @@ func convertStack(s errors.StackTrace) *Stack { if len(p) != 2 { continue } + if _, ok := helpers[p[0]]; ok { + continue + } idx := strings.LastIndexByte(p[1], ':') if idx == -1 { continue diff --git a/vendor/github.com/moby/buildkit/util/system/path.go b/vendor/github.com/moby/buildkit/util/system/path.go new file mode 100644 index 0000000000000000000000000000000000000000..f6dc70dc8dd5672d3b69ea55f1d98b91f25307ff --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/system/path.go @@ -0,0 +1,18 @@ +package system + +// DefaultPathEnvUnix is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +const DefaultPathEnvUnix = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnvWindows is windows style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ';' character . +const DefaultPathEnvWindows = "c:\\Windows\\System32;c:\\Windows" + +func DefaultPathEnv(os string) string { + if os == "windows" { + return DefaultPathEnvWindows + } + return DefaultPathEnvUnix +} diff --git a/vendor/github.com/moby/buildkit/util/system/path_unix.go b/vendor/github.com/moby/buildkit/util/system/path_unix.go index c607c4db09f20c9ddea7a687afe5fa81322bdd42..f3762e69d36ad8c4f0e642e3769b28293a2d35d3 100644 --- a/vendor/github.com/moby/buildkit/util/system/path_unix.go +++ b/vendor/github.com/moby/buildkit/util/system/path_unix.go @@ -2,11 +2,6 @@ package system -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - // CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, // is the system drive. This is a no-op on Linux. func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { diff --git a/vendor/github.com/moby/buildkit/util/system/path_windows.go b/vendor/github.com/moby/buildkit/util/system/path_windows.go index cbfe2c1576ceb371fb3eed84895b49b0feb83beb..3fc47449484e8c64b448747180ff7fb3412795e6 100644 --- a/vendor/github.com/moby/buildkit/util/system/path_windows.go +++ b/vendor/github.com/moby/buildkit/util/system/path_windows.go @@ -8,10 +8,6 @@ import ( "strings" ) -// DefaultPathEnv is deliberately empty on Windows as the default path will be set by -// the container. Docker has no context of what the default path should be. -const DefaultPathEnv = "" - // CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. // This is used, for example, when validating a user provided path in docker cp. // If a drive letter is supplied, it must be the system drive. The drive letter diff --git a/vendor/github.com/moby/buildkit/util/throttle/throttle.go b/vendor/github.com/moby/buildkit/util/throttle/throttle.go index 490ccd9c3dcd6fb2e1a015da349b79c2011788e4..dfc4aefa90dc8f8ddacfce92e07fd7b13ebb7b3b 100644 --- a/vendor/github.com/moby/buildkit/util/throttle/throttle.go +++ b/vendor/github.com/moby/buildkit/util/throttle/throttle.go @@ -11,10 +11,10 @@ func Throttle(d time.Duration, f func()) func() { return throttle(d, f, true) } -// ThrottleAfter wraps a function so that internal function does not get called +// After wraps a function so that internal function does not get called // more frequently than the specified duration. The delay is added after function // has been called. -func ThrottleAfter(d time.Duration, f func()) func() { +func After(d time.Duration, f func()) func() { return throttle(d, f, false) } diff --git a/vendor/github.com/moby/buildkit/util/winlayers/applier.go b/vendor/github.com/moby/buildkit/util/winlayers/applier.go new file mode 100644 index 0000000000000000000000000000000000000000..91e415232a3aae85b66c27f349215a7fb59865b2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/winlayers/applier.go @@ -0,0 +1,187 @@ +package winlayers + +import ( + "archive/tar" + "context" + "io" + "io/ioutil" + "runtime" + "strings" + "sync" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/mount" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func NewFileSystemApplierWithWindows(cs content.Provider, a diff.Applier) diff.Applier { + if runtime.GOOS == "windows" { + return a + } + + return &winApplier{ + cs: cs, + a: a, + } +} + +type winApplier struct { + cs content.Provider + a diff.Applier +} + +func (s *winApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispec.Descriptor, err error) { + if !hasWindowsLayerMode(ctx) { + return s.a.Apply(ctx, desc, mounts, opts...) + } + + compressed, err := images.DiffCompression(ctx, desc.MediaType) + if err != nil { + return ocispec.Descriptor{}, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", desc.MediaType) + } + + var ocidesc ocispec.Descriptor + if err := mount.WithTempMount(ctx, mounts, func(root string) error { + ra, err := s.cs.ReaderAt(ctx, desc) + if err != nil { + return errors.Wrap(err, "failed to get reader from content store") + } + defer ra.Close() + + r := content.NewReader(ra) + if compressed != "" { + ds, err := compression.DecompressStream(r) + if err != nil { + return err + } + defer ds.Close() + r = ds + } + + digester := digest.Canonical.Digester() + rc := &readCounter{ + r: io.TeeReader(r, digester.Hash()), + } + + rc2, discard := filter(rc, func(hdr *tar.Header) bool { + if strings.HasPrefix(hdr.Name, "Files/") { + hdr.Name = strings.TrimPrefix(hdr.Name, "Files/") + hdr.Linkname = strings.TrimPrefix(hdr.Linkname, "Files/") + // TODO: could convert the windows PAX headers to xattr here to reuse + // the original ones in diff for parent directories and file modifications + return true + } + return false + }) + + if _, err := archive.Apply(ctx, root, rc2); err != nil { + discard(err) + return err + } + + // Read any trailing data + if _, err := io.Copy(ioutil.Discard, rc); err != nil { + discard(err) + return err + } + + ocidesc = ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayer, + Size: rc.c, + Digest: digester.Digest(), + } + return nil + + }); err != nil { + return ocispec.Descriptor{}, err + } + return ocidesc, nil +} + +type readCounter struct { + r io.Reader + c int64 +} + +func (rc *readCounter) Read(p []byte) (n int, err error) { + n, err = rc.r.Read(p) + rc.c += int64(n) + return +} + +func filter(in io.Reader, f func(*tar.Header) bool) (io.Reader, func(error)) { + pr, pw := io.Pipe() + + rc := &readCanceler{Reader: in} + + go func() { + tarReader := tar.NewReader(rc) + tarWriter := tar.NewWriter(pw) + + pw.CloseWithError(func() error { + for { + h, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if f(h) { + if err := tarWriter.WriteHeader(h); err != nil { + return err + } + if h.Size > 0 { + if _, err := io.Copy(tarWriter, tarReader); err != nil { + return err + } + } + } else { + if h.Size > 0 { + if _, err := io.Copy(ioutil.Discard, tarReader); err != nil { + return err + } + } + } + } + return tarWriter.Close() + }()) + }() + + discard := func(err error) { + rc.cancel(err) + pw.CloseWithError(err) + } + + return pr, discard +} + +type readCanceler struct { + mu sync.Mutex + io.Reader + err error +} + +func (r *readCanceler) Read(b []byte) (int, error) { + r.mu.Lock() + if r.err != nil { + r.mu.Unlock() + return 0, r.err + } + n, err := r.Reader.Read(b) + r.mu.Unlock() + return n, err +} + +func (r *readCanceler) cancel(err error) { + r.mu.Lock() + r.err = err + r.mu.Unlock() +} diff --git a/vendor/github.com/moby/buildkit/util/winlayers/context.go b/vendor/github.com/moby/buildkit/util/winlayers/context.go new file mode 100644 index 0000000000000000000000000000000000000000..c0bd3f8a2f066c67168642e0113997943c27ca33 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/winlayers/context.go @@ -0,0 +1,19 @@ +package winlayers + +import "context" + +type contextKeyT string + +var contextKey = contextKeyT("buildkit/winlayers-on") + +func UseWindowsLayerMode(ctx context.Context) context.Context { + return context.WithValue(ctx, contextKey, true) +} + +func hasWindowsLayerMode(ctx context.Context) bool { + v := ctx.Value(contextKey) + if v == nil { + return false + } + return true +} diff --git a/vendor/github.com/moby/buildkit/util/winlayers/differ.go b/vendor/github.com/moby/buildkit/util/winlayers/differ.go new file mode 100644 index 0000000000000000000000000000000000000000..cdbc335d49d926c7458c99408d659eff3c25c7dd --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/winlayers/differ.go @@ -0,0 +1,274 @@ +package winlayers + +import ( + "archive/tar" + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "io" + "time" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + keyFileAttr = "MSWINDOWS.fileattr" + keySDRaw = "MSWINDOWS.rawsd" + keyCreationTime = "LIBARCHIVE.creationtime" +) + +func NewWalkingDiffWithWindows(store content.Store, d diff.Comparer) diff.Comparer { + return &winDiffer{ + store: store, + d: d, + } +} + +var emptyDesc = ocispec.Descriptor{} + +type winDiffer struct { + store content.Store + d diff.Comparer +} + +// Compare creates a diff between the given mounts and uploads the result +// to the content store. +func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) { + if !hasWindowsLayerMode(ctx) { + return s.d.Compare(ctx, lower, upper, opts...) + } + + var config diff.Config + for _, opt := range opts { + if err := opt(&config); err != nil { + return emptyDesc, err + } + } + + if config.MediaType == "" { + config.MediaType = ocispec.MediaTypeImageLayerGzip + } + + var isCompressed bool + switch config.MediaType { + case ocispec.MediaTypeImageLayer: + case ocispec.MediaTypeImageLayerGzip: + isCompressed = true + default: + return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", config.MediaType) + } + + var ocidesc ocispec.Descriptor + if err := mount.WithTempMount(ctx, lower, func(lowerRoot string) error { + return mount.WithTempMount(ctx, upper, func(upperRoot string) error { + var newReference bool + if config.Reference == "" { + newReference = true + config.Reference = uniqueRef() + } + + cw, err := s.store.Writer(ctx, + content.WithRef(config.Reference), + content.WithDescriptor(ocispec.Descriptor{ + MediaType: config.MediaType, // most contentstore implementations just ignore this + })) + if err != nil { + return errors.Wrap(err, "failed to open writer") + } + defer func() { + if err != nil { + cw.Close() + if newReference { + if err := s.store.Abort(ctx, config.Reference); err != nil { + log.G(ctx).WithField("ref", config.Reference).Warnf("failed to delete diff upload") + } + } + } + }() + if !newReference { + if err := cw.Truncate(0); err != nil { + return err + } + } + + if isCompressed { + dgstr := digest.SHA256.Digester() + compressed, err := compression.CompressStream(cw, compression.Gzip) + if err != nil { + return errors.Wrap(err, "failed to get compressed stream") + } + var w io.Writer = io.MultiWriter(compressed, dgstr.Hash()) + w, discard, done := makeWindowsLayer(w) + err = archive.WriteDiff(ctx, w, lowerRoot, upperRoot) + if err != nil { + discard(err) + } + <-done + compressed.Close() + if err != nil { + return errors.Wrap(err, "failed to write compressed diff") + } + + if config.Labels == nil { + config.Labels = map[string]string{} + } + config.Labels["containerd.io/uncompressed"] = dgstr.Digest().String() + } else { + w, discard, done := makeWindowsLayer(cw) + if err = archive.WriteDiff(ctx, w, lowerRoot, upperRoot); err != nil { + discard(err) + return errors.Wrap(err, "failed to write diff") + } + <-done + } + + var commitopts []content.Opt + if config.Labels != nil { + commitopts = append(commitopts, content.WithLabels(config.Labels)) + } + + dgst := cw.Digest() + if err := cw.Commit(ctx, 0, dgst, commitopts...); err != nil { + return errors.Wrap(err, "failed to commit") + } + + info, err := s.store.Info(ctx, dgst) + if err != nil { + return errors.Wrap(err, "failed to get info from content store") + } + + ocidesc = ocispec.Descriptor{ + MediaType: config.MediaType, + Size: info.Size, + Digest: info.Digest, + } + return nil + }) + }); err != nil { + return emptyDesc, err + } + + return ocidesc, nil +} + +func uniqueRef() string { + t := time.Now() + var b [3]byte + // Ignore read failures, just decreases uniqueness + rand.Read(b[:]) + return fmt.Sprintf("%d-%s", t.UnixNano(), base64.URLEncoding.EncodeToString(b[:])) +} + +func prepareWinHeader(h *tar.Header) { + if h.PAXRecords == nil { + h.PAXRecords = map[string]string{} + } + if h.Typeflag == tar.TypeDir { + h.Mode |= 1 << 14 + h.PAXRecords[keyFileAttr] = "16" + } + + if h.Typeflag == tar.TypeReg { + h.Mode |= 1 << 15 + h.PAXRecords[keyFileAttr] = "32" + } + + if !h.ModTime.IsZero() { + h.PAXRecords[keyCreationTime] = fmt.Sprintf("%d.%d", h.ModTime.Unix(), h.ModTime.Nanosecond()) + } + + h.Format = tar.FormatPAX +} + +func addSecurityDescriptor(h *tar.Header) { + if h.Typeflag == tar.TypeDir { + // O:BAG:SYD:(A;OICI;FA;;;BA)(A;OICI;FA;;;SY)(A;;FA;;;BA)(A;OICIIO;GA;;;CO)(A;OICI;0x1200a9;;;BU)(A;CI;LC;;;BU)(A;CI;DC;;;BU) + h.PAXRecords[keySDRaw] = "AQAEgBQAAAAkAAAAAAAAADAAAAABAgAAAAAABSAAAAAgAgAAAQEAAAAAAAUSAAAAAgCoAAcAAAAAAxgA/wEfAAECAAAAAAAFIAAAACACAAAAAxQA/wEfAAEBAAAAAAAFEgAAAAAAGAD/AR8AAQIAAAAAAAUgAAAAIAIAAAALFAAAAAAQAQEAAAAAAAMAAAAAAAMYAKkAEgABAgAAAAAABSAAAAAhAgAAAAIYAAQAAAABAgAAAAAABSAAAAAhAgAAAAIYAAIAAAABAgAAAAAABSAAAAAhAgAA" + } + + if h.Typeflag == tar.TypeReg { + // O:BAG:SYD:(A;;FA;;;BA)(A;;FA;;;SY)(A;;0x1200a9;;;BU) + h.PAXRecords[keySDRaw] = "AQAEgBQAAAAkAAAAAAAAADAAAAABAgAAAAAABSAAAAAgAgAAAQEAAAAAAAUSAAAAAgBMAAMAAAAAABgA/wEfAAECAAAAAAAFIAAAACACAAAAABQA/wEfAAEBAAAAAAAFEgAAAAAAGACpABIAAQIAAAAAAAUgAAAAIQIAAA==" + } +} + +func makeWindowsLayer(w io.Writer) (io.Writer, func(error), chan error) { + pr, pw := io.Pipe() + done := make(chan error) + + go func() { + tarReader := tar.NewReader(pr) + tarWriter := tar.NewWriter(w) + + err := func() error { + + h := &tar.Header{ + Name: "Hives", + Typeflag: tar.TypeDir, + ModTime: time.Now(), + } + prepareWinHeader(h) + if err := tarWriter.WriteHeader(h); err != nil { + return err + } + + h = &tar.Header{ + Name: "Files", + Typeflag: tar.TypeDir, + ModTime: time.Now(), + } + prepareWinHeader(h) + if err := tarWriter.WriteHeader(h); err != nil { + return err + } + + for { + h, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + h.Name = "Files/" + h.Name + if h.Linkname != "" { + h.Linkname = "Files/" + h.Linkname + } + prepareWinHeader(h) + addSecurityDescriptor(h) + if err := tarWriter.WriteHeader(h); err != nil { + return err + } + if h.Size > 0 { + if _, err := io.Copy(tarWriter, tarReader); err != nil { + return err + } + } + } + return tarWriter.Close() + }() + if err != nil { + logrus.Errorf("makeWindowsLayer %+v", err) + } + pw.CloseWithError(err) + done <- err + return + }() + + discard := func(err error) { + pw.CloseWithError(err) + } + + return pw, discard, done +} diff --git a/vendor/github.com/moby/buildkit/worker/cacheresult.go b/vendor/github.com/moby/buildkit/worker/cacheresult.go index d0ee1a2a6a61a64ca344fa57334118f7a2836fea..0e684e7d57b46bc9fc77c681054a9aa021c88573 100644 --- a/vendor/github.com/moby/buildkit/worker/cacheresult.go +++ b/vendor/github.com/moby/buildkit/worker/cacheresult.go @@ -6,7 +6,9 @@ import ( "time" "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/compression" "github.com/pkg/errors" ) @@ -36,7 +38,7 @@ func (s *cacheResultStorage) Save(res solver.Result, createdAt time.Time) (solve return solver.CacheResult{ID: ref.ID(), CreatedAt: createdAt}, nil } func (s *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) (solver.Result, error) { - return s.load(res.ID, false) + return s.load(ctx, res.ID, false) } func (s *cacheResultStorage) getWorkerRef(id string) (Worker, string, error) { @@ -51,7 +53,7 @@ func (s *cacheResultStorage) getWorkerRef(id string) (Worker, string, error) { return w, refID, nil } -func (s *cacheResultStorage) load(id string, hidden bool) (solver.Result, error) { +func (s *cacheResultStorage) load(ctx context.Context, id string, hidden bool) (solver.Result, error) { w, refID, err := s.getWorkerRef(id) if err != nil { return nil, err @@ -59,31 +61,32 @@ func (s *cacheResultStorage) load(id string, hidden bool) (solver.Result, error) if refID == "" { return NewWorkerRefResult(nil, w), nil } - ref, err := w.LoadRef(refID, hidden) + ref, err := w.LoadRef(ctx, refID, hidden) if err != nil { return nil, err } return NewWorkerRefResult(ref, w), nil } -func (s *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult) (*solver.Remote, error) { +func (s *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult, g session.Group) (*solver.Remote, error) { w, refID, err := s.getWorkerRef(res.ID) if err != nil { return nil, err } - ref, err := w.LoadRef(refID, true) + ref, err := w.LoadRef(ctx, refID, true) if err != nil { return nil, err } defer ref.Release(context.TODO()) - remote, err := w.GetRemote(ctx, ref, false) + wref := WorkerRef{ref, w} + remote, err := wref.GetRemote(ctx, false, compression.Default, g) if err != nil { return nil, nil // ignore error. loadRemote is best effort } return remote, nil } func (s *cacheResultStorage) Exists(id string) bool { - ref, err := s.load(id, true) + ref, err := s.load(context.TODO(), id, true) if err != nil { return false } diff --git a/vendor/github.com/moby/buildkit/worker/result.go b/vendor/github.com/moby/buildkit/worker/result.go index 9aa6af4167489c45c62cc9d95bbc343dbc3e870d..e178ef3fffe152f37f701fdca7a35b9cbbe50c97 100644 --- a/vendor/github.com/moby/buildkit/worker/result.go +++ b/vendor/github.com/moby/buildkit/worker/result.go @@ -4,7 +4,9 @@ import ( "context" "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/compression" ) func NewWorkerRefResult(ref cache.ImmutableRef, worker Worker) solver.Result { @@ -24,6 +26,18 @@ func (wr *WorkerRef) ID() string { return wr.Worker.ID() + "::" + refID } +// GetRemote method abstracts ImmutableRef's GetRemote to allow a Worker to override. +// This is needed for moby integration. +// Use this method instead of calling ImmutableRef.GetRemote() directly. +func (wr *WorkerRef) GetRemote(ctx context.Context, createIfNeeded bool, compressionType compression.Type, g session.Group) (*solver.Remote, error) { + if w, ok := wr.Worker.(interface { + GetRemote(context.Context, cache.ImmutableRef, bool, compression.Type, session.Group) (*solver.Remote, error) + }); ok { + return w.GetRemote(ctx, wr.ImmutableRef, createIfNeeded, compressionType, g) + } + return wr.ImmutableRef.GetRemote(ctx, createIfNeeded, compressionType, g) +} + type workerRefResult struct { *WorkerRef } diff --git a/vendor/github.com/moby/buildkit/worker/worker.go b/vendor/github.com/moby/buildkit/worker/worker.go index bc095bc5e9e9813c40af79a21e1c0609676e4b2e..a34e598de65c8415c7a8cd99bfb2ae5db2289a2c 100644 --- a/vendor/github.com/moby/buildkit/worker/worker.go +++ b/vendor/github.com/moby/buildkit/worker/worker.go @@ -5,6 +5,7 @@ import ( "github.com/containerd/containerd/content" "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/executor" @@ -23,19 +24,19 @@ type Worker interface { Platforms(noCache bool) []specs.Platform GCPolicy() []client.PruneInfo - LoadRef(id string, hidden bool) (cache.ImmutableRef, error) + LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) // ResolveOp resolves Vertex.Sys() to Op implementation. ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) Prune(ctx context.Context, ch chan client.UsageInfo, opt ...client.PruneInfo) error - GetRemote(ctx context.Context, ref cache.ImmutableRef, createIfNeeded bool) (*solver.Remote, error) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) PruneCacheMounts(ctx context.Context, ids []string) error ContentStore() content.Store Executor() executor.Executor CacheManager() cache.Manager + MetadataStore() *metadata.Store } // Pre-defined label keys diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE b/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE index c259d1290717de28c3cb22a4cab94668d20e819c..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 100644 --- a/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE +++ b/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE @@ -1,27 +1,201 @@ -Copyright (c) 2016, opentracing-contrib -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of go-stdlib nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/README.md b/vendor/github.com/opentracing-contrib/go-stdlib/README.md index 139709c1461a5204f1755e140b8072be9ee6d5d1..c820daad1456cfffad33b64651a43c18f7da78d4 100644 --- a/vendor/github.com/opentracing-contrib/go-stdlib/README.md +++ b/vendor/github.com/opentracing-contrib/go-stdlib/README.md @@ -16,3 +16,7 @@ following caveats: - **net/http**: Client and server instrumentation. *Only supported with Go 1.7 and later.* + +## License + +By contributing to this repository, you agree that your contributions will be licensed under [Apache 2.0 License](./LICENSE). diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/go.mod b/vendor/github.com/opentracing-contrib/go-stdlib/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..3b3c5c2230b1ffedb15527abb4c4199111f307fe --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-stdlib/go.mod @@ -0,0 +1,7 @@ +module github.com/opentracing-contrib/go-stdlib + +go 1.14 + +require ( + github.com/opentracing/opentracing-go v1.1.0 +) diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go index 8d33bcb63c0fe689802642f3d7728aba7cc76286..bfb305ffa463a38c22560c87e52ae714fcacae5f 100644 --- a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go +++ b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go @@ -7,6 +7,7 @@ import ( "io" "net/http" "net/http/httptrace" + "net/url" "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" @@ -31,9 +32,12 @@ type Transport struct { } type clientOptions struct { - operationName string - componentName string - disableClientTrace bool + operationName string + componentName string + urlTagFunc func(u *url.URL) string + disableClientTrace bool + disableInjectSpanContext bool + spanObserver func(span opentracing.Span, r *http.Request) } // ClientOption contols the behavior of TraceRequest. @@ -47,6 +51,15 @@ func OperationName(operationName string) ClientOption { } } +// URLTagFunc returns a ClientOption that uses given function f +// to set the span's http.url tag. Can be used to change the default +// http.url tag, eg to redact sensitive information. +func URLTagFunc(f func(u *url.URL) string) ClientOption { + return func(options *clientOptions) { + options.urlTagFunc = f + } +} + // ComponentName returns a ClientOption that sets the component // name for the client-side span. func ComponentName(componentName string) ClientOption { @@ -63,6 +76,24 @@ func ClientTrace(enabled bool) ClientOption { } } +// InjectSpanContext returns a ClientOption that turns on or off +// injection of the Span context in the request HTTP headers. +// If this option is not used, the default behaviour is to +// inject the span context. +func InjectSpanContext(enabled bool) ClientOption { + return func(options *clientOptions) { + options.disableInjectSpanContext = !enabled + } +} + +// ClientSpanObserver returns a ClientOption that observes the span +// for the client-side span. +func ClientSpanObserver(f func(span opentracing.Span, r *http.Request)) ClientOption { + return func(options *clientOptions) { + options.spanObserver = f + } +} + // TraceRequest adds a ClientTracer to req, tracing the request and // all requests caused due to redirects. When tracing requests this // way you must also use Transport. @@ -88,7 +119,12 @@ func ClientTrace(enabled bool) ClientOption { // return nil // } func TraceRequest(tr opentracing.Tracer, req *http.Request, options ...ClientOption) (*http.Request, *Tracer) { - opts := &clientOptions{} + opts := &clientOptions{ + urlTagFunc: func(u *url.URL) string { + return u.String() + }, + spanObserver: func(_ opentracing.Span, _ *http.Request) {}, + } for _, opt := range options { opt(opts) } @@ -113,24 +149,38 @@ func (c closeTracker) Close() error { return err } +// TracerFromRequest retrieves the Tracer from the request. If the request does +// not have a Tracer it will return nil. +func TracerFromRequest(req *http.Request) *Tracer { + tr, ok := req.Context().Value(keyTracer).(*Tracer) + if !ok { + return nil + } + return tr +} + // RoundTrip implements the RoundTripper interface. func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { rt := t.RoundTripper if rt == nil { rt = http.DefaultTransport } - tracer, ok := req.Context().Value(keyTracer).(*Tracer) - if !ok { + tracer := TracerFromRequest(req) + if tracer == nil { return rt.RoundTrip(req) } tracer.start(req) ext.HTTPMethod.Set(tracer.sp, req.Method) - ext.HTTPUrl.Set(tracer.sp, req.URL.String()) + ext.HTTPUrl.Set(tracer.sp, tracer.opts.urlTagFunc(req.URL)) + tracer.opts.spanObserver(tracer.sp, req) + + if !tracer.opts.disableInjectSpanContext { + carrier := opentracing.HTTPHeadersCarrier(req.Header) + tracer.sp.Tracer().Inject(tracer.sp.Context(), opentracing.HTTPHeaders, carrier) + } - carrier := opentracing.HTTPHeadersCarrier(req.Header) - tracer.sp.Tracer().Inject(tracer.sp.Context(), opentracing.HTTPHeaders, carrier) resp, err := rt.RoundTrip(req) if err != nil { @@ -138,6 +188,9 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { return resp, err } ext.HTTPStatusCode.Set(tracer.sp, uint16(resp.StatusCode)) + if resp.StatusCode >= http.StatusInternalServerError { + ext.Error.Set(tracer.sp, true) + } if req.Method == "HEAD" { tracer.sp.Finish() } else { diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go index 2b31415e7e871c65bfc16d9ce92a48e5b917e6f8..db2df6620412b23c855ee249b5198b05f7be157d 100644 --- a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go +++ b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go @@ -4,23 +4,17 @@ package nethttp import ( "net/http" + "net/url" opentracing "github.com/opentracing/opentracing-go" "github.com/opentracing/opentracing-go/ext" ) -type statusCodeTracker struct { - http.ResponseWriter - status int -} - -func (w *statusCodeTracker) WriteHeader(status int) { - w.status = status - w.ResponseWriter.WriteHeader(status) -} - type mwOptions struct { opNameFunc func(r *http.Request) string + spanFilter func(r *http.Request) bool + spanObserver func(span opentracing.Span, r *http.Request) + urlTagFunc func(u *url.URL) string componentName string } @@ -36,13 +30,39 @@ func OperationNameFunc(f func(r *http.Request) string) MWOption { } // MWComponentName returns a MWOption that sets the component name -// name for the server-side span. +// for the server-side span. func MWComponentName(componentName string) MWOption { return func(options *mwOptions) { options.componentName = componentName } } +// MWSpanFilter returns a MWOption that filters requests from creating a span +// for the server-side span. +// Span won't be created if it returns false. +func MWSpanFilter(f func(r *http.Request) bool) MWOption { + return func(options *mwOptions) { + options.spanFilter = f + } +} + +// MWSpanObserver returns a MWOption that observe the span +// for the server-side span. +func MWSpanObserver(f func(span opentracing.Span, r *http.Request)) MWOption { + return func(options *mwOptions) { + options.spanObserver = f + } +} + +// MWURLTagFunc returns a MWOption that uses given function f +// to set the span's http.url tag. Can be used to change the default +// http.url tag, eg to redact sensitive information. +func MWURLTagFunc(f func(u *url.URL) string) MWOption { + return func(options *mwOptions) { + options.urlTagFunc = f + } +} + // Middleware wraps an http.Handler and traces incoming requests. // Additionally, it adds the span to the request's context. // @@ -58,39 +78,80 @@ func MWComponentName(componentName string) MWOption { // mw := nethttp.Middleware( // tracer, // http.DefaultServeMux, -// nethttp.OperationName(func(r *http.Request) string { +// nethttp.OperationNameFunc(func(r *http.Request) string { // return "HTTP " + r.Method + ":/api/customers" // }), +// nethttp.MWSpanObserver(func(sp opentracing.Span, r *http.Request) { +// sp.SetTag("http.uri", r.URL.EscapedPath()) +// }), // ) func Middleware(tr opentracing.Tracer, h http.Handler, options ...MWOption) http.Handler { + return MiddlewareFunc(tr, h.ServeHTTP, options...) +} + +// MiddlewareFunc wraps an http.HandlerFunc and traces incoming requests. +// It behaves identically to the Middleware function above. +// +// Example: +// http.ListenAndServe("localhost:80", nethttp.MiddlewareFunc(tracer, MyHandler)) +func MiddlewareFunc(tr opentracing.Tracer, h http.HandlerFunc, options ...MWOption) http.HandlerFunc { opts := mwOptions{ opNameFunc: func(r *http.Request) string { return "HTTP " + r.Method }, + spanFilter: func(r *http.Request) bool { return true }, + spanObserver: func(span opentracing.Span, r *http.Request) {}, + urlTagFunc: func(u *url.URL) string { + return u.String() + }, } for _, opt := range options { opt(&opts) } + // set component name, use "net/http" if caller does not specify + componentName := opts.componentName + if componentName == "" { + componentName = defaultComponentName + } + fn := func(w http.ResponseWriter, r *http.Request) { + if !opts.spanFilter(r) { + h(w, r) + return + } ctx, _ := tr.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(r.Header)) sp := tr.StartSpan(opts.opNameFunc(r), ext.RPCServerOption(ctx)) ext.HTTPMethod.Set(sp, r.Method) - ext.HTTPUrl.Set(sp, r.URL.String()) - - // set component name, use "net/http" if caller does not specify - componentName := opts.componentName - if componentName == "" { - componentName = defaultComponentName - } + ext.HTTPUrl.Set(sp, opts.urlTagFunc(r.URL)) ext.Component.Set(sp, componentName) + opts.spanObserver(sp, r) - w = &statusCodeTracker{w, 200} + sct := &statusCodeTracker{ResponseWriter: w} r = r.WithContext(opentracing.ContextWithSpan(r.Context(), sp)) - h.ServeHTTP(w, r) + defer func() { + panicErr := recover() + didPanic := panicErr != nil + + if sct.status == 0 && !didPanic { + // Standard behavior of http.Server is to assume status code 200 if one was not written by a handler that returned successfully. + // https://github.com/golang/go/blob/fca286bed3ed0e12336532cc711875ae5b3cb02a/src/net/http/server.go#L120 + sct.status = 200 + } + if sct.status > 0 { + ext.HTTPStatusCode.Set(sp, uint16(sct.status)) + } + if sct.status >= http.StatusInternalServerError || didPanic { + ext.Error.Set(sp, true) + } + sp.Finish() + + if didPanic { + panic(panicErr) + } + }() - ext.HTTPStatusCode.Set(sp, uint16(w.(*statusCodeTracker).status)) - sp.Finish() + h(sct.wrappedResponseWriter(), r) } return http.HandlerFunc(fn) } diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/status-code-tracker.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/status-code-tracker.go new file mode 100644 index 0000000000000000000000000000000000000000..80a5ce08645b3a22f30b6951db1c2f1ebfd466a0 --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/status-code-tracker.go @@ -0,0 +1,251 @@ +// +build go1.8 + +package nethttp + +import ( + "io" + "net/http" +) + +type statusCodeTracker struct { + http.ResponseWriter + status int +} + +func (w *statusCodeTracker) WriteHeader(status int) { + w.status = status + w.ResponseWriter.WriteHeader(status) +} + +func (w *statusCodeTracker) Write(b []byte) (int, error) { + return w.ResponseWriter.Write(b) +} + +// wrappedResponseWriter returns a wrapped version of the original +// ResponseWriter and only implements the same combination of additional +// interfaces as the original. This implementation is based on +// https://github.com/felixge/httpsnoop. +func (w *statusCodeTracker) wrappedResponseWriter() http.ResponseWriter { + var ( + hj, i0 = w.ResponseWriter.(http.Hijacker) + cn, i1 = w.ResponseWriter.(http.CloseNotifier) + pu, i2 = w.ResponseWriter.(http.Pusher) + fl, i3 = w.ResponseWriter.(http.Flusher) + rf, i4 = w.ResponseWriter.(io.ReaderFrom) + ) + + switch { + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + }{w} + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + io.ReaderFrom + }{w, rf} + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Flusher + }{w, fl} + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{w, fl, rf} + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + }{w, pu} + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + io.ReaderFrom + }{w, pu, rf} + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + }{w, pu, fl} + case !i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + io.ReaderFrom + }{w, pu, fl, rf} + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + }{w, cn} + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{w, cn, rf} + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + }{w, cn, fl} + case !i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{w, cn, fl, rf} + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{w, cn, pu} + case !i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{w, cn, pu, rf} + case !i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + }{w, cn, pu, fl} + case !i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{w, cn, pu, fl, rf} + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + }{w, hj} + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{w, hj, rf} + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + }{w, hj, fl} + case i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + io.ReaderFrom + }{w, hj, fl, rf} + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + }{w, hj, pu} + case i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + io.ReaderFrom + }{w, hj, pu, rf} + case i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + }{w, hj, pu, fl} + case i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + io.ReaderFrom + }{w, hj, pu, fl, rf} + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + }{w, hj, cn} + case i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + io.ReaderFrom + }{w, hj, cn, rf} + case i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + }{w, hj, cn, fl} + case i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{w, hj, cn, fl, rf} + case i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + }{w, hj, cn, pu} + case i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{w, hj, cn, pu, rf} + case i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + }{w, hj, cn, pu, fl} + case i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{w, hj, cn, pu, fl, rf} + default: + return struct { + http.ResponseWriter + }{w} + } +} diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE index 148509a4035a9136c3e208f6b7a4206db1b6e80f..f0027349e8302d3631136c94fbead02cdaa27386 100644 --- a/vendor/github.com/opentracing/opentracing-go/LICENSE +++ b/vendor/github.com/opentracing/opentracing-go/LICENSE @@ -1,21 +1,201 @@ -The MIT License (MIT) - -Copyright (c) 2016 The OpenTracing Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The OpenTracing Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md index 19a541c2c593d5e40f6af863e4ab9638cb6bf223..6ef1d7c9d27492b6b70e8563832e968663428d5e 100644 --- a/vendor/github.com/opentracing/opentracing-go/README.md +++ b/vendor/github.com/opentracing/opentracing-go/README.md @@ -8,8 +8,8 @@ This package is a Go platform API for OpenTracing. ## Required Reading In order to understand the Go platform API, one must first be familiar with the -[OpenTracing project](http://opentracing.io) and -[terminology](http://opentracing.io/documentation/pages/spec.html) more specifically. +[OpenTracing project](https://opentracing.io) and +[terminology](https://opentracing.io/specification/) more specifically. ## API overview for those adding instrumentation @@ -27,7 +27,7 @@ The simplest starting point is `./default_tracer.go`. As early as possible, call import ".../some_tracing_impl" func main() { - opentracing.InitGlobalTracer( + opentracing.SetGlobalTracer( // tracing impl specific: some_tracing_impl.New(...), ) @@ -35,7 +35,7 @@ The simplest starting point is `./default_tracer.go`. As early as possible, call } ``` -##### Non-Singleton initialization +#### Non-Singleton initialization If you prefer direct control to singletons, manage ownership of the `opentracing.Tracer` implementation explicitly. @@ -161,3 +161,11 @@ Tracing system implementors may be able to reuse or copy-paste-modify the `basic ## API compatibility For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority. + +## Tracer test suite + +A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly. + +## Licensing + +[Apache 2.0 License](./LICENSE). diff --git a/vendor/github.com/opentracing/opentracing-go/ext.go b/vendor/github.com/opentracing/opentracing-go/ext.go new file mode 100644 index 0000000000000000000000000000000000000000..e11977ebe85dbd8c0c01acbede764a76853f439a --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext.go @@ -0,0 +1,24 @@ +package opentracing + +import ( + "context" +) + +// TracerContextWithSpanExtension is an extension interface that the +// implementation of the Tracer interface may want to implement. It +// allows to have some control over the go context when the +// ContextWithSpan is invoked. +// +// The primary purpose of this extension are adapters from opentracing +// API to some other tracing API. +type TracerContextWithSpanExtension interface { + // ContextWithSpanHook gets called by the ContextWithSpan + // function, when the Tracer implementation also implements + // this interface. It allows to put extra information into the + // context and make it available to the callers of the + // ContextWithSpan. + // + // This hook is invoked before the ContextWithSpan function + // actually puts the span into the context. + ContextWithSpanHook(ctx context.Context, span Span) context.Context +} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/field.go b/vendor/github.com/opentracing/opentracing-go/ext/field.go new file mode 100644 index 0000000000000000000000000000000000000000..8282bd758467690aba93c9fb8d794e0ce403ac5f --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext/field.go @@ -0,0 +1,17 @@ +package ext + +import ( + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/log" +) + +// LogError sets the error=true tag on the Span and logs err as an "error" event. +func LogError(span opentracing.Span, err error, fields ...log.Field) { + Error.Set(span, true) + ef := []log.Field{ + log.Event("error"), + log.Error(err), + } + ef = append(ef, fields...) + span.LogFields(ef...) +} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go index 8800129a237de963045e8780e60f0f4864be3bbd..a414b5951f03fc5761cd37f559ef255af698c979 100644 --- a/vendor/github.com/opentracing/opentracing-go/ext/tags.go +++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go @@ -1,6 +1,6 @@ package ext -import opentracing "github.com/opentracing/opentracing-go" +import "github.com/opentracing/opentracing-go" // These constants define common tag names recommended for better portability across // tracing systems and languages/platforms. @@ -47,40 +47,40 @@ var ( // Component is a low-cardinality identifier of the module, library, // or package that is generating a span. - Component = stringTagName("component") + Component = StringTagName("component") ////////////////////////////////////////////////////////////////////// // Sampling hint ////////////////////////////////////////////////////////////////////// // SamplingPriority determines the priority of sampling this Span. - SamplingPriority = uint16TagName("sampling.priority") + SamplingPriority = Uint16TagName("sampling.priority") ////////////////////////////////////////////////////////////////////// - // Peer tags. These tags can be emitted by either client-side of + // Peer tags. These tags can be emitted by either client-side or // server-side to describe the other side/service in a peer-to-peer // communications, like an RPC call. ////////////////////////////////////////////////////////////////////// // PeerService records the service name of the peer. - PeerService = stringTagName("peer.service") + PeerService = StringTagName("peer.service") // PeerAddress records the address name of the peer. This may be a "ip:port", // a bare "hostname", a FQDN or even a database DSN substring // like "mysql://username@127.0.0.1:3306/dbname" - PeerAddress = stringTagName("peer.address") + PeerAddress = StringTagName("peer.address") // PeerHostname records the host name of the peer - PeerHostname = stringTagName("peer.hostname") + PeerHostname = StringTagName("peer.hostname") // PeerHostIPv4 records IP v4 host address of the peer - PeerHostIPv4 = ipv4Tag("peer.ipv4") + PeerHostIPv4 = IPv4TagName("peer.ipv4") // PeerHostIPv6 records IP v6 host address of the peer - PeerHostIPv6 = stringTagName("peer.ipv6") + PeerHostIPv6 = StringTagName("peer.ipv6") // PeerPort records port number of the peer - PeerPort = uint16TagName("peer.port") + PeerPort = Uint16TagName("peer.port") ////////////////////////////////////////////////////////////////////// // HTTP Tags @@ -88,46 +88,46 @@ var ( // HTTPUrl should be the URL of the request being handled in this segment // of the trace, in standard URI format. The protocol is optional. - HTTPUrl = stringTagName("http.url") + HTTPUrl = StringTagName("http.url") // HTTPMethod is the HTTP method of the request, and is case-insensitive. - HTTPMethod = stringTagName("http.method") + HTTPMethod = StringTagName("http.method") // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the // HTTP response. - HTTPStatusCode = uint16TagName("http.status_code") + HTTPStatusCode = Uint16TagName("http.status_code") ////////////////////////////////////////////////////////////////////// // DB Tags ////////////////////////////////////////////////////////////////////// // DBInstance is database instance name. - DBInstance = stringTagName("db.instance") + DBInstance = StringTagName("db.instance") // DBStatement is a database statement for the given database type. // It can be a query or a prepared statement (i.e., before substitution). - DBStatement = stringTagName("db.statement") + DBStatement = StringTagName("db.statement") // DBType is a database type. For any SQL database, "sql". // For others, the lower-case database category, e.g. "redis" - DBType = stringTagName("db.type") + DBType = StringTagName("db.type") // DBUser is a username for accessing database. - DBUser = stringTagName("db.user") + DBUser = StringTagName("db.user") ////////////////////////////////////////////////////////////////////// // Message Bus Tag ////////////////////////////////////////////////////////////////////// // MessageBusDestination is an address at which messages can be exchanged - MessageBusDestination = stringTagName("message_bus.destination") + MessageBusDestination = StringTagName("message_bus.destination") ////////////////////////////////////////////////////////////////////// // Error Tag ////////////////////////////////////////////////////////////////////// // Error indicates that operation represented by the span resulted in an error. - Error = boolTagName("error") + Error = BoolTagName("error") ) // --- @@ -163,48 +163,53 @@ func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption // --- -type stringTagName string +// StringTagName is a common tag name to be set to a string value +type StringTagName string // Set adds a string tag to the `span` -func (tag stringTagName) Set(span opentracing.Span, value string) { +func (tag StringTagName) Set(span opentracing.Span, value string) { span.SetTag(string(tag), value) } // --- -type uint32TagName string +// Uint32TagName is a common tag name to be set to a uint32 value +type Uint32TagName string // Set adds a uint32 tag to the `span` -func (tag uint32TagName) Set(span opentracing.Span, value uint32) { +func (tag Uint32TagName) Set(span opentracing.Span, value uint32) { span.SetTag(string(tag), value) } // --- -type uint16TagName string +// Uint16TagName is a common tag name to be set to a uint16 value +type Uint16TagName string // Set adds a uint16 tag to the `span` -func (tag uint16TagName) Set(span opentracing.Span, value uint16) { +func (tag Uint16TagName) Set(span opentracing.Span, value uint16) { span.SetTag(string(tag), value) } // --- -type boolTagName string +// BoolTagName is a common tag name to be set to a bool value +type BoolTagName string -// Add adds a bool tag to the `span` -func (tag boolTagName) Set(span opentracing.Span, value bool) { +// Set adds a bool tag to the `span` +func (tag BoolTagName) Set(span opentracing.Span, value bool) { span.SetTag(string(tag), value) } -type ipv4Tag string +// IPv4TagName is a common tag name to be set to an ipv4 value +type IPv4TagName string // Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility -func (tag ipv4Tag) Set(span opentracing.Span, value uint32) { +func (tag IPv4TagName) Set(span opentracing.Span, value uint32) { span.SetTag(string(tag), value) } // SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1" -func (tag ipv4Tag) SetString(span opentracing.Span, value string) { +func (tag IPv4TagName) SetString(span opentracing.Span, value string) { span.SetTag(string(tag), value) } diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go index 8c8e793ff23c6a637652b85e259d984a1039c1a2..4f7066a925cd9e2e80e2430c0a2e1fb8361f6f9b 100644 --- a/vendor/github.com/opentracing/opentracing-go/globaltracer.go +++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go @@ -1,7 +1,12 @@ package opentracing +type registeredTracer struct { + tracer Tracer + isRegistered bool +} + var ( - globalTracer Tracer = NoopTracer{} + globalTracer = registeredTracer{NoopTracer{}, false} ) // SetGlobalTracer sets the [singleton] opentracing.Tracer returned by @@ -11,22 +16,27 @@ var ( // Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` // (etc) globals are noops. func SetGlobalTracer(tracer Tracer) { - globalTracer = tracer + globalTracer = registeredTracer{tracer, true} } // GlobalTracer returns the global singleton `Tracer` implementation. // Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop // implementation that drops all data handed to it. func GlobalTracer() Tracer { - return globalTracer + return globalTracer.tracer } // StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. func StartSpan(operationName string, opts ...StartSpanOption) Span { - return globalTracer.StartSpan(operationName, opts...) + return globalTracer.tracer.StartSpan(operationName, opts...) } // InitGlobalTracer is deprecated. Please use SetGlobalTracer. func InitGlobalTracer(tracer Tracer) { SetGlobalTracer(tracer) } + +// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered +func IsGlobalTracerRegistered() bool { + return globalTracer.isRegistered +} diff --git a/vendor/github.com/opentracing/opentracing-go/go.mod b/vendor/github.com/opentracing/opentracing-go/go.mod new file mode 100644 index 0000000000000000000000000000000000000000..bf48bb5d73f748a117838b2f8853408895be191c --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/go.mod @@ -0,0 +1,5 @@ +module github.com/opentracing/opentracing-go + +go 1.14 + +require github.com/stretchr/testify v1.3.0 diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go index 222a65202f49b4eabc3de6c893405e6bbede33b0..1831bc9b263716538d11349b849f0fc628cb21a2 100644 --- a/vendor/github.com/opentracing/opentracing-go/gocontext.go +++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go @@ -1,14 +1,19 @@ package opentracing -import "golang.org/x/net/context" +import "context" type contextKey struct{} var activeSpanKey = contextKey{} // ContextWithSpan returns a new `context.Context` that holds a reference to -// `span`'s SpanContext. +// the span. If span is nil, a new context without an active span is returned. func ContextWithSpan(ctx context.Context, span Span) context.Context { + if span != nil { + if tracerWithHook, ok := span.Tracer().(TracerContextWithSpanExtension); ok { + ctx = tracerWithHook.ContextWithSpanHook(ctx, span) + } + } return context.WithValue(ctx, activeSpanKey, span) } @@ -41,17 +46,20 @@ func SpanFromContext(ctx context.Context) Span { // ... // } func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { - return startSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) + return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) } -// startSpanFromContextWithTracer is factored out for testing purposes. -func startSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { - var span Span +// StartSpanFromContextWithTracer starts and returns a span with `operationName` +// using a span found within the context as a ChildOfRef. If that doesn't exist +// it creates a root span. It also returns a context.Context object built +// around the returned span. +// +// It's behavior is identical to StartSpanFromContext except that it takes an explicit +// tracer as opposed to using the global tracer. +func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { if parentSpan := SpanFromContext(ctx); parentSpan != nil { opts = append(opts, ChildOf(parentSpan.Context())) - span = tracer.StartSpan(operationName, opts...) - } else { - span = tracer.StartSpan(operationName, opts...) } + span := tracer.StartSpan(operationName, opts...) return span, ContextWithSpan(ctx, span) } diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go index 50feea341a732865402478bdd1f741bf3df8a4b6..f222ded797c17a32fd4f6f01e398cde09c796a37 100644 --- a/vendor/github.com/opentracing/opentracing-go/log/field.go +++ b/vendor/github.com/opentracing/opentracing-go/log/field.go @@ -122,16 +122,19 @@ func Float64(key string, val float64) Field { } } -// Error adds an error with the key "error" to a Span.LogFields() record +// Error adds an error with the key "error.object" to a Span.LogFields() record func Error(err error) Field { return Field{ - key: "error", + key: "error.object", fieldType: errorType, interfaceVal: err, } } // Object adds an object-valued key:value pair to a Span.LogFields() record +// Please pass in an immutable object, otherwise there may be concurrency issues. +// Such as passing in the map, log.Object may result in "fatal error: concurrent map iteration and map write". +// Because span is sent asynchronously, it is possible that this map will also be modified. func Object(key string, obj interface{}) Field { return Field{ key: key, @@ -140,6 +143,16 @@ func Object(key string, obj interface{}) Field { } } +// Event creates a string-valued Field for span logs with key="event" and value=val. +func Event(val string) Field { + return String("event", val) +} + +// Message creates a string-valued Field for span logs with key="message" and value=val. +func Message(val string) Field { + return String("message", val) +} + // LazyLogger allows for user-defined, late-bound logging of arbitrary data type LazyLogger func(fv Encoder) diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go index 3832feb5ceb297f01e6fda2e2b13224dcea0aedc..d57e28aa57f36379d1efc21dadfbb5b633697eeb 100644 --- a/vendor/github.com/opentracing/opentracing-go/log/util.go +++ b/vendor/github.com/opentracing/opentracing-go/log/util.go @@ -1,6 +1,9 @@ package log -import "fmt" +import ( + "fmt" + "reflect" +) // InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice // a la Span.LogFields(). @@ -46,6 +49,10 @@ func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) { case float64: fields[i] = Float64(key, typedVal) default: + if typedVal == nil || (reflect.ValueOf(typedVal).Kind() == reflect.Ptr && reflect.ValueOf(typedVal).IsNil()) { + fields[i] = String(key, "nil") + continue + } // When in doubt, coerce to a string fields[i] = String(key, fmt.Sprint(typedVal)) } diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go index 0d32f692c4104649aa3f5b9b40b0d59974fb2a95..f9b680a213de908ccc3a3a67d69eb09ca44ead1a 100644 --- a/vendor/github.com/opentracing/opentracing-go/noop.go +++ b/vendor/github.com/opentracing/opentracing-go/noop.go @@ -21,9 +21,9 @@ type noopSpan struct{} type noopSpanContext struct{} var ( - defaultNoopSpanContext = noopSpanContext{} - defaultNoopSpan = noopSpan{} - defaultNoopTracer = NoopTracer{} + defaultNoopSpanContext SpanContext = noopSpanContext{} + defaultNoopSpan Span = noopSpan{} + defaultNoopTracer Tracer = NoopTracer{} ) const ( @@ -35,7 +35,7 @@ func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} // noopSpan: func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext } -func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan } +func (n noopSpan) SetBaggageItem(key, val string) Span { return n } func (n noopSpan) BaggageItem(key string) string { return emptyString } func (n noopSpan) SetTag(key string, value interface{}) Span { return n } func (n noopSpan) LogFields(fields ...log.Field) {} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go index 0dd466a373efa662cf92b72f90249f4deb682afe..b0c275eb05e4d5fb457f9fc723d83e502f5d0979 100644 --- a/vendor/github.com/opentracing/opentracing-go/propagation.go +++ b/vendor/github.com/opentracing/opentracing-go/propagation.go @@ -160,7 +160,7 @@ type HTTPHeadersCarrier http.Header // Set conforms to the TextMapWriter interface. func (c HTTPHeadersCarrier) Set(key, val string) { h := http.Header(c) - h.Add(key, val) + h.Set(key, val) } // ForeachKey conforms to the TextMapReader interface. diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go index f6c3234accf672e83929fb0f89186638137cb485..0d3fb53418382d1c05b27ed16ce540b469303a9c 100644 --- a/vendor/github.com/opentracing/opentracing-go/span.go +++ b/vendor/github.com/opentracing/opentracing-go/span.go @@ -41,6 +41,8 @@ type Span interface { Context() SpanContext // Sets or changes the operation name. + // + // Returns a reference to this Span for chaining. SetOperationName(operationName string) Span // Adds a tag to the span. @@ -51,6 +53,8 @@ type Span interface { // other tag value types is undefined at the OpenTracing level. If a // tracing system does not know how to handle a particular value type, it // may ignore the tag, but shall not panic. + // + // Returns a reference to this Span for chaining. SetTag(key string, value interface{}) Span // LogFields is an efficient and type-checked way to record key:value diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go index 7bca1f73676d9bd25e2298a8cce425a3bfa5a42e..715f0cedfb60ef71a4fea034e947fe2d50bf5c45 100644 --- a/vendor/github.com/opentracing/opentracing-go/tracer.go +++ b/vendor/github.com/opentracing/opentracing-go/tracer.go @@ -44,8 +44,7 @@ type Tracer interface { // and each has an expected carrier type. // // Other packages may declare their own `format` values, much like the keys - // used by `context.Context` (see - // https://godoc.org/golang.org/x/net/context#WithValue). + // used by `context.Context` (see https://godoc.org/context#WithValue). // // Example usage (sans error handling): // diff --git a/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go b/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go index cdd80ec9a73e49cbd7960d5fb31044a4758fe915..a3ba09881d59fbf8f80a3ac80c3a818c835934b8 100644 --- a/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go +++ b/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go @@ -5,16 +5,18 @@ package fsutil import ( "os" "time" + + "github.com/pkg/errors" ) func chtimes(path string, un int64) error { mtime := time.Unix(0, un) fi, err := os.Lstat(path) if err != nil { - return err + return errors.WithStack(err) } if fi.Mode()&os.ModeSymlink != 0 { return nil } - return os.Chtimes(path, mtime, mtime) + return errors.WithStack(os.Chtimes(path, mtime, mtime)) } diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy.go b/vendor/github.com/tonistiigi/fsutil/copy/copy.go index 27b7d56564786efa1553375c57898a89b86027f5..02b3dc9ef1f1d34dd4acbb82ac8c7135e2d74deb 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy.go @@ -147,7 +147,7 @@ func (c *copier) prepareTargetDir(srcFollowed, src, destPath string, copyDirCont } type User struct { - Uid, Gid int + UID, GID int } type Chowner func(*User) (*User, error) @@ -175,7 +175,7 @@ func WithCopyInfo(ci CopyInfo) func(*CopyInfo) { func WithChown(uid, gid int) Opt { return func(ci *CopyInfo) { ci.Chown = func(*User) (*User, error) { - return &User{Uid: uid, Gid: gid}, nil + return &User{UID: uid, GID: gid}, nil } } } diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go index 268a9fc47e48e22043da23a5394985dd69ead673..4e6b93c1663deec75d89da457e708274507f8353 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go @@ -10,7 +10,7 @@ import ( "golang.org/x/sys/unix" ) -func getUidGid(fi os.FileInfo) (uid, gid int) { +func getUIDGID(fi os.FileInfo) (uid, gid int) { st := fi.Sys().(*syscall.Stat_t) return int(st.Uid), int(st.Gid) } @@ -19,8 +19,8 @@ func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { st := fi.Sys().(*syscall.Stat_t) chown := c.chown - uid, gid := getUidGid(fi) - old := &User{Uid: uid, Gid: gid} + uid, gid := getUIDGID(fi) + old := &User{UID: uid, GID: gid} if chown == nil { chown = func(u *User) (*User, error) { return u, nil diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go index e80ee7892b2fc3b6dff013f69218142b127be5d9..6debee281d0e8cdabf5f00967bdc139a914c75fb 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go @@ -10,7 +10,7 @@ import ( "golang.org/x/sys/unix" ) -func getUidGid(fi os.FileInfo) (uid, gid int) { +func getUIDGID(fi os.FileInfo) (uid, gid int) { st := fi.Sys().(*syscall.Stat_t) return int(st.Uid), int(st.Gid) } @@ -18,8 +18,8 @@ func getUidGid(fi os.FileInfo) (uid, gid int) { func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { st := fi.Sys().(*syscall.Stat_t) chown := c.chown - uid, gid := getUidGid(fi) - old := &User{Uid: uid, Gid: gid} + uid, gid := getUIDGID(fi) + old := &User{UID: uid, GID: gid} if chown == nil { chown = func(u *User) (*User, error) { return u, nil diff --git a/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go b/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go index b5eeb90d3646fa9a6167517aed01ff1388555310..98547544759b6705d1cce90b625e04ff60b71456 100644 --- a/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go +++ b/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go @@ -17,7 +17,7 @@ func Chown(p string, old *User, fn Chowner) error { return errors.WithStack(err) } if user != nil { - if err := os.Lchown(p, user.Uid, user.Gid); err != nil { + if err := os.Lchown(p, user.UID, user.GID); err != nil { return err } } diff --git a/vendor/github.com/tonistiigi/fsutil/diff.go b/vendor/github.com/tonistiigi/fsutil/diff.go index 1cbc32b30669f63f5468d6f737774643fb47d5bd..a7405dc5332c3490dfa09ffd409c47e56cb6f970 100644 --- a/vendor/github.com/tonistiigi/fsutil/diff.go +++ b/vendor/github.com/tonistiigi/fsutil/diff.go @@ -19,9 +19,9 @@ type HandleChangeFn func(ChangeKind, string, os.FileInfo, error) error type ContentHasher func(*types.Stat) (hash.Hash, error) -func GetWalkerFn(root string) walkerFn { +func getWalkerFn(root string) walkerFn { return func(ctx context.Context, pathC chan<- *currentPath) error { - return Walk(ctx, root, nil, func(path string, f os.FileInfo, err error) error { + return errors.Wrap(Walk(ctx, root, nil, func(path string, f os.FileInfo, err error) error { if err != nil { return err } @@ -42,7 +42,7 @@ func GetWalkerFn(root string) walkerFn { case pathC <- p: return nil } - }) + }), "failed to walk") } } diff --git a/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go b/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go deleted file mode 100644 index 4ac7ec5ed7057b73ef6891ecc1ae97db43953deb..0000000000000000000000000000000000000000 --- a/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go +++ /dev/null @@ -1,37 +0,0 @@ -package fsutil - -import ( - "bytes" - "syscall" - - "github.com/containerd/continuity/sysx" - "github.com/pkg/errors" -) - -// compareSysStat returns whether the stats are equivalent, -// whether the files are considered the same file, and -// an error -func compareSysStat(s1, s2 interface{}) (bool, error) { - ls1, ok := s1.(*syscall.Stat_t) - if !ok { - return false, nil - } - ls2, ok := s2.(*syscall.Stat_t) - if !ok { - return false, nil - } - - return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil -} - -func compareCapabilities(p1, p2 string) (bool, error) { - c1, err := sysx.LGetxattr(p1, "security.capability") - if err != nil && err != syscall.ENODATA { - return false, errors.Wrapf(err, "failed to get xattr for %s", p1) - } - c2, err := sysx.LGetxattr(p2, "security.capability") - if err != nil && err != syscall.ENODATA { - return false, errors.Wrapf(err, "failed to get xattr for %s", p2) - } - return bytes.Equal(c1, c2), nil -} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter.go b/vendor/github.com/tonistiigi/fsutil/diskwriter.go index 70323c88c9f38a57278b2e4b02d90a27475e4a60..786432264f0f1268434430ebe128eabc0825d233 100644 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter.go +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter.go @@ -8,6 +8,7 @@ import ( "path/filepath" "strconv" "sync" + "syscall" "time" "github.com/opencontainers/go-digest" @@ -32,7 +33,6 @@ type DiskWriter struct { opt DiskWriterOpt dest string - wg sync.WaitGroup ctx context.Context cancel func() eg *errgroup.Group @@ -104,7 +104,7 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er stat, ok := fi.Sys().(*types.Stat) if !ok { - return errors.Errorf("%s invalid change without stat information", p) + return errors.WithStack(&os.PathError{Path: p, Err: syscall.EBADMSG, Op: "change without stat info"}) } statCopy := *stat @@ -118,13 +118,13 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er rename := true oldFi, err := os.Lstat(destPath) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { if kind != ChangeKindAdd { - return errors.Wrapf(err, "invalid addition: %s", destPath) + return errors.Wrap(err, "modify/rm") } rename = false } else { - return errors.Wrapf(err, "failed to stat %s", destPath) + return errors.WithStack(err) } } @@ -285,7 +285,6 @@ func (hw *hashedWriter) Digest() digest.Digest { type lazyFileWriter struct { dest string - ctx context.Context f *os.File fileMode *os.FileMode } diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go index ff0a22e3caf8e7afd77609cdb361dee490600410..aa2d298f40ea133eff21cf7784daa13f016f3951 100644 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go @@ -17,17 +17,17 @@ func rewriteMetadata(p string, stat *types.Stat) error { } if err := os.Lchown(p, int(stat.Uid), int(stat.Gid)); err != nil { - return errors.Wrapf(err, "failed to lchown %s", p) + return errors.WithStack(err) } if os.FileMode(stat.Mode)&os.ModeSymlink == 0 { if err := os.Chmod(p, os.FileMode(stat.Mode)); err != nil { - return errors.Wrapf(err, "failed to chown %s", p) + return errors.WithStack(err) } } if err := chtimes(p, stat.ModTime); err != nil { - return errors.Wrapf(err, "failed to chtimes %s", p) + return err } return nil @@ -46,7 +46,7 @@ func handleTarTypeBlockCharFifo(path string, stat *types.Stat) error { } if err := syscall.Mknod(path, mode, int(mkdev(stat.Devmajor, stat.Devminor))); err != nil { - return err + return errors.WithStack(err) } return nil } diff --git a/vendor/github.com/tonistiigi/fsutil/followlinks.go b/vendor/github.com/tonistiigi/fsutil/followlinks.go index ed4af6e8165651db43812986b47dcee99b438d5c..a0942413e8112d0796d1f8a0744bc0f2ae2ed08a 100644 --- a/vendor/github.com/tonistiigi/fsutil/followlinks.go +++ b/vendor/github.com/tonistiigi/fsutil/followlinks.go @@ -77,10 +77,10 @@ func (r *symlinkResolver) readSymlink(p string, allowWildcard bool) ([]string, e if allowWildcard && containsWildcards(base) { fis, err := ioutil.ReadDir(filepath.Dir(realPath)) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { return nil, nil } - return nil, errors.Wrapf(err, "failed to read dir %s", filepath.Dir(realPath)) + return nil, errors.Wrap(err, "readdir") } var out []string for _, f := range fis { @@ -97,17 +97,17 @@ func (r *symlinkResolver) readSymlink(p string, allowWildcard bool) ([]string, e fi, err := os.Lstat(realPath) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, os.ErrNotExist) { return nil, nil } - return nil, errors.Wrapf(err, "failed to lstat %s", realPath) + return nil, errors.WithStack(err) } if fi.Mode()&os.ModeSymlink == 0 { return nil, nil } link, err := os.Readlink(realPath) if err != nil { - return nil, errors.Wrapf(err, "failed to readlink %s", realPath) + return nil, errors.WithStack(err) } link = filepath.Clean(link) if filepath.IsAbs(link) { diff --git a/vendor/github.com/tonistiigi/fsutil/fs.go b/vendor/github.com/tonistiigi/fsutil/fs.go index a9467e94027ddd55df2f2988be97db337b4418a7..e26110b320b37847b4d4e2dbc8640bd2fabeaf69 100644 --- a/vendor/github.com/tonistiigi/fsutil/fs.go +++ b/vendor/github.com/tonistiigi/fsutil/fs.go @@ -9,6 +9,7 @@ import ( "path/filepath" "sort" "strings" + "syscall" "github.com/pkg/errors" "github.com/tonistiigi/fsutil/types" @@ -36,7 +37,8 @@ func (fs *fs) Walk(ctx context.Context, fn filepath.WalkFunc) error { } func (fs *fs) Open(p string) (io.ReadCloser, error) { - return os.Open(filepath.Join(fs.root, p)) + rc, err := os.Open(filepath.Join(fs.root, p)) + return rc, errors.WithStack(err) } type Dir struct { @@ -51,10 +53,10 @@ func SubDirFS(dirs []Dir) (FS, error) { m := map[string]Dir{} for _, d := range dirs { if path.Base(d.Stat.Path) != d.Stat.Path { - return nil, errors.Errorf("subdir %s must be single file", d.Stat.Path) + return nil, errors.WithStack(&os.PathError{Path: d.Stat.Path, Err: syscall.EISDIR, Op: "invalid path"}) } if _, ok := m[d.Stat.Path]; ok { - return nil, errors.Errorf("invalid path %s", d.Stat.Path) + return nil, errors.WithStack(&os.PathError{Path: d.Stat.Path, Err: syscall.EEXIST, Op: "duplicate path"}) } m[d.Stat.Path] = d } @@ -70,7 +72,7 @@ func (fs *subDirFS) Walk(ctx context.Context, fn filepath.WalkFunc) error { for _, d := range fs.dirs { fi := &StatInfo{Stat: &d.Stat} if !fi.IsDir() { - return errors.Errorf("fs subdir %s not mode directory", d.Stat.Path) + return errors.WithStack(&os.PathError{Path: d.Stat.Path, Err: syscall.ENOTDIR, Op: "walk subdir"}) } if err := fn(d.Stat.Path, fi, nil); err != nil { return err @@ -78,7 +80,7 @@ func (fs *subDirFS) Walk(ctx context.Context, fn filepath.WalkFunc) error { if err := d.FS.Walk(ctx, func(p string, fi os.FileInfo, err error) error { stat, ok := fi.Sys().(*types.Stat) if !ok { - return errors.Wrapf(err, "invalid fileinfo without stat info: %s", p) + return errors.WithStack(&os.PathError{Path: d.Stat.Path, Err: syscall.EBADMSG, Op: "fileinfo without stat info"}) } stat.Path = path.Join(d.Stat.Path, stat.Path) if stat.Linkname != "" { @@ -105,7 +107,7 @@ func (fs *subDirFS) Open(p string) (io.ReadCloser, error) { } d, ok := fs.m[parts[0]] if !ok { - return nil, os.ErrNotExist + return nil, errors.WithStack(&os.PathError{Path: parts[0], Err: syscall.ENOENT, Op: "open"}) } return d.FS.Open(parts[1]) } diff --git a/vendor/github.com/tonistiigi/fsutil/go.mod b/vendor/github.com/tonistiigi/fsutil/go.mod index ed41f5301a0be02c8e01e6f0892aa2c7625800e1..d95432cd1dcf9fe866029da47187bf017586d88c 100644 --- a/vendor/github.com/tonistiigi/fsutil/go.mod +++ b/vendor/github.com/tonistiigi/fsutil/go.mod @@ -5,14 +5,10 @@ go 1.13 require ( github.com/Microsoft/hcsshim v0.8.9 // indirect github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc - github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/docker v0.0.0-20200511152416-a93e9eb0e95c github.com/gogo/protobuf v1.3.1 - github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect github.com/moby/sys/mount v0.1.0 // indirect github.com/moby/sys/mountinfo v0.1.3 // indirect - github.com/onsi/ginkgo v1.7.0 // indirect - github.com/onsi/gomega v1.4.3 // indirect github.com/opencontainers/go-digest v1.0.0-rc1 github.com/opencontainers/image-spec v1.0.1 // indirect github.com/opencontainers/runc v1.0.0-rc10 // indirect @@ -20,6 +16,5 @@ require ( github.com/stretchr/testify v1.5.1 golang.org/x/sync v0.0.0-20190423024810-112230192c58 golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae - gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect - gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect + gotest.tools/v3 v3.0.2 // indirect ) diff --git a/vendor/github.com/tonistiigi/fsutil/hardlinks.go b/vendor/github.com/tonistiigi/fsutil/hardlinks.go index d977f0d6bbf3e52e8920e8248294bd3fb4510d1c..ef8bbfb5daff76478c9a61154b2c56917048ffd8 100644 --- a/vendor/github.com/tonistiigi/fsutil/hardlinks.go +++ b/vendor/github.com/tonistiigi/fsutil/hardlinks.go @@ -2,6 +2,7 @@ package fsutil import ( "os" + "syscall" "github.com/pkg/errors" "github.com/tonistiigi/fsutil/types" @@ -28,7 +29,7 @@ func (v *Hardlinks) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err stat, ok := fi.Sys().(*types.Stat) if !ok { - return errors.Errorf("invalid change without stat info: %s", p) + return errors.WithStack(&os.PathError{Path: p, Err: syscall.EBADMSG, Op: "change without stat info"}) } if fi.IsDir() || fi.Mode()&os.ModeSymlink != 0 { diff --git a/vendor/github.com/tonistiigi/fsutil/receive.go b/vendor/github.com/tonistiigi/fsutil/receive.go index 0210dcdb1add95a4e8e2fb9e61a2f5ce3a4758cb..5c6a486978be3183846911472c9cf15882fbecb0 100644 --- a/vendor/github.com/tonistiigi/fsutil/receive.go +++ b/vendor/github.com/tonistiigi/fsutil/receive.go @@ -20,7 +20,7 @@ type ReceiveOpt struct { } func Receive(ctx context.Context, conn Stream, dest string, opt ReceiveOpt) error { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(ctx) defer cancel() r := &receiver{ @@ -105,7 +105,6 @@ func (w *dynamicWalker) fill(ctx context.Context, pathC chan<- *currentPath) err return ctx.Err() } } - return nil } func (r *receiver) run(ctx context.Context) error { @@ -131,7 +130,7 @@ func (r *receiver) run(ctx context.Context) error { }() destWalker := emptyWalker if !r.merge { - destWalker = GetWalkerFn(r.dest) + destWalker = getWalkerFn(r.dest) } err := doubleWalkDiff(ctx, dw.HandleChange, destWalker, w.fill, r.filter) if err != nil { diff --git a/vendor/github.com/tonistiigi/fsutil/send.go b/vendor/github.com/tonistiigi/fsutil/send.go index e7c5a37d1b172e3bffa90bb396f8163c6325c1fb..2c1a3801d58a2a46cfc050600eb63165e3d59096 100644 --- a/vendor/github.com/tonistiigi/fsutil/send.go +++ b/vendor/github.com/tonistiigi/fsutil/send.go @@ -5,6 +5,7 @@ import ( "io" "os" "sync" + "syscall" "github.com/pkg/errors" "github.com/tonistiigi/fsutil/types" @@ -13,7 +14,8 @@ import ( var bufPool = sync.Pool{ New: func() interface{} { - return make([]byte, 32*1<<10) + buf := make([]byte, 32*1<<10) + return &buf }, } @@ -131,9 +133,9 @@ func (s *sender) sendFile(h *sendHandle) error { f, err := s.fs.Open(h.path) if err == nil { defer f.Close() - buf := bufPool.Get().([]byte) + buf := bufPool.Get().(*[]byte) defer bufPool.Put(buf) - if _, err := io.CopyBuffer(&fileSender{sender: s, id: h.id}, f, buf); err != nil { + if _, err := io.CopyBuffer(&fileSender{sender: s, id: h.id}, f, *buf); err != nil { return err } } @@ -148,7 +150,7 @@ func (s *sender) walk(ctx context.Context) error { } stat, ok := fi.Sys().(*types.Stat) if !ok { - return errors.Wrapf(err, "invalid fileinfo without stat info: %s", path) + return errors.WithStack(&os.PathError{Path: path, Err: syscall.EBADMSG, Op: "fileinfo without stat info"}) } p := &types.Packet{ diff --git a/vendor/github.com/tonistiigi/fsutil/stat.go b/vendor/github.com/tonistiigi/fsutil/stat.go index 789dce3dbfb4ed171986d071248a4be76e315d09..2ab8da118e2cfda04721ccdda155b57296da4d39 100644 --- a/vendor/github.com/tonistiigi/fsutil/stat.go +++ b/vendor/github.com/tonistiigi/fsutil/stat.go @@ -31,13 +31,13 @@ func mkstat(path, relpath string, fi os.FileInfo, inodemap map[uint64]string) (* if fi.Mode()&os.ModeSymlink != 0 { link, err := os.Readlink(path) if err != nil { - return nil, errors.Wrapf(err, "failed to readlink %s", path) + return nil, errors.WithStack(err) } stat.Linkname = link } } if err := loadXattr(path, stat); err != nil { - return nil, errors.Wrapf(err, "failed to xattr %s", relpath) + return nil, err } if runtime.GOOS == "windows" { @@ -58,7 +58,7 @@ func mkstat(path, relpath string, fi os.FileInfo, inodemap map[uint64]string) (* func Stat(path string) (*types.Stat, error) { fi, err := os.Lstat(path) if err != nil { - return nil, errors.Wrap(err, "os stat") + return nil, errors.WithStack(err) } return mkstat(path, filepath.Base(path), fi, nil) } diff --git a/vendor/github.com/tonistiigi/fsutil/tarwriter.go b/vendor/github.com/tonistiigi/fsutil/tarwriter.go index 06f28c55ffaae548cb653c08a0b41c1e8b4a625a..bd46a2250ff847ba7b42d4751420860079e5b328 100644 --- a/vendor/github.com/tonistiigi/fsutil/tarwriter.go +++ b/vendor/github.com/tonistiigi/fsutil/tarwriter.go @@ -7,6 +7,7 @@ import ( "os" "path/filepath" "strings" + "syscall" "github.com/pkg/errors" "github.com/tonistiigi/fsutil/types" @@ -15,9 +16,12 @@ import ( func WriteTar(ctx context.Context, fs FS, w io.Writer) error { tw := tar.NewWriter(w) err := fs.Walk(ctx, func(path string, fi os.FileInfo, err error) error { + if err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } stat, ok := fi.Sys().(*types.Stat) if !ok { - return errors.Wrapf(err, "invalid fileinfo without stat info: %s", path) + return errors.WithStack(&os.PathError{Path: path, Err: syscall.EBADMSG, Op: "fileinfo without stat info"}) } hdr, err := tar.FileInfoHeader(fi, stat.Linkname) if err != nil { @@ -37,7 +41,7 @@ func WriteTar(ctx context.Context, fs FS, w io.Writer) error { hdr.Linkname = stat.Linkname if hdr.Linkname != "" { hdr.Size = 0 - if fi.Mode() & os.ModeSymlink != 0 { + if fi.Mode()&os.ModeSymlink != 0 { hdr.Typeflag = tar.TypeSymlink } else { hdr.Typeflag = tar.TypeLink @@ -52,7 +56,7 @@ func WriteTar(ctx context.Context, fs FS, w io.Writer) error { } if err := tw.WriteHeader(hdr); err != nil { - return errors.Wrap(err, "failed to write file header") + return errors.Wrapf(err, "failed to write file header %s", name) } if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 && hdr.Linkname == "" { @@ -61,10 +65,10 @@ func WriteTar(ctx context.Context, fs FS, w io.Writer) error { return err } if _, err := io.Copy(tw, rc); err != nil { - return err + return errors.WithStack(err) } if err := rc.Close(); err != nil { - return err + return errors.WithStack(err) } } return nil diff --git a/vendor/github.com/tonistiigi/fsutil/validator.go b/vendor/github.com/tonistiigi/fsutil/validator.go index 2bd1287a8535f50fb16907bf5e72efda663b4377..9bd7d94d36438a204c3edf191d5226eab51fec19 100644 --- a/vendor/github.com/tonistiigi/fsutil/validator.go +++ b/vendor/github.com/tonistiigi/fsutil/validator.go @@ -6,6 +6,7 @@ import ( "runtime" "sort" "strings" + "syscall" "github.com/pkg/errors" ) @@ -31,10 +32,10 @@ func (v *Validator) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err p = strings.Replace(p, "\\", "", -1) } if p != path.Clean(p) { - return errors.Errorf("invalid unclean path %s", p) + return errors.WithStack(&os.PathError{Path: p, Err: syscall.EINVAL, Op: "unclean path"}) } if path.IsAbs(p) { - return errors.Errorf("abolute path %s not allowed", p) + return errors.WithStack(&os.PathError{Path: p, Err: syscall.EINVAL, Op: "absolute path"}) } dir := path.Dir(p) base := path.Base(p) @@ -42,7 +43,7 @@ func (v *Validator) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err dir = "" } if dir == ".." || strings.HasPrefix(p, "../") { - return errors.Errorf("invalid path: %s", p) + return errors.WithStack(&os.PathError{Path: p, Err: syscall.EINVAL, Op: "escape check"}) } // find a parent dir from saved records diff --git a/vendor/github.com/tonistiigi/fsutil/walker.go b/vendor/github.com/tonistiigi/fsutil/walker.go index 6004b888508e264df973ee361caedcf010fe8aec..b10383e4c5ab54878983573c962bb026eed996c0 100644 --- a/vendor/github.com/tonistiigi/fsutil/walker.go +++ b/vendor/github.com/tonistiigi/fsutil/walker.go @@ -25,21 +25,21 @@ type WalkOpt struct { func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) error { root, err := filepath.EvalSymlinks(p) if err != nil { - return errors.Wrapf(err, "failed to resolve %s", root) + return errors.WithStack(&os.PathError{Op: "resolve", Path: root, Err: err}) } fi, err := os.Stat(root) if err != nil { - return errors.Wrapf(err, "failed to stat: %s", root) + return errors.WithStack(err) } if !fi.IsDir() { - return errors.Errorf("%s is not a directory", root) + return errors.WithStack(&os.PathError{Op: "walk", Path: root, Err: syscall.ENOTDIR}) } var pm *fileutils.PatternMatcher if opt != nil && opt.ExcludePatterns != nil { pm, err = fileutils.NewPatternMatcher(opt.ExcludePatterns) if err != nil { - return errors.Wrapf(err, "invalid excludepaths %s", opt.ExcludePatterns) + return errors.Wrapf(err, "invalid excludepatterns: %s", opt.ExcludePatterns) } } @@ -65,17 +65,15 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err seenFiles := make(map[uint64]string) return filepath.Walk(root, func(path string, fi os.FileInfo, err error) (retErr error) { - if err != nil { - if os.IsNotExist(err) { - return filepath.SkipDir - } - return err - } defer func() { if retErr != nil && isNotExist(retErr) { retErr = filepath.SkipDir } }() + if err != nil { + return err + } + origpath := path path, err = filepath.Rel(root, path) if err != nil { diff --git a/vendor/github.com/tonistiigi/units/LICENSE b/vendor/github.com/tonistiigi/units/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..5c1095df0d9da41c5741329ac75fd9c22939bc08 --- /dev/null +++ b/vendor/github.com/tonistiigi/units/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Tõnis Tiigi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/tonistiigi/units/bytes.go b/vendor/github.com/tonistiigi/units/bytes.go new file mode 100644 index 0000000000000000000000000000000000000000..5a82fc1b349b95d0124bedc5e7f58d638583ac61 --- /dev/null +++ b/vendor/github.com/tonistiigi/units/bytes.go @@ -0,0 +1,125 @@ +/* + Simple byte size formatting. + + This package implements types that can be used in stdlib formatting functions + like `fmt.Printf` to control the output of the expected printed string. + + + Floating point flags %f and %g print the value in using the correct unit + suffix. Decimal units are default, # switches to binary units. If a value is + best represented as full bytes, integer bytes are printed instead. + + Examples: + fmt.Printf("%.2f", 123 * B) => "123B" + fmt.Printf("%.2f", 1234 * B) => "1.23kB" + fmt.Printf("%g", 1200 * B) => "1.2kB" + fmt.Printf("%#g", 1024 * B) => "1KiB" + + + Integer flag %d always prints the value in bytes. # flag adds an unit prefix. + + Examples: + fmt.Printf("%d", 1234 * B) => "1234" + fmt.Printf("%#d", 1234 * B) => "1234B" + + %v is equal to %g + +*/ +package units + +import ( + "fmt" + "io" + "math" + "math/big" +) + +type Bytes int64 + +const ( + B Bytes = 1 << (10 * iota) + KiB + MiB + GiB + TiB + PiB + EiB + + KB = 1e3 * B + MB = 1e3 * KB + GB = 1e3 * MB + TB = 1e3 * GB + PB = 1e3 * TB + EB = 1e3 * PB +) + +var units = map[bool][]string{ + false: []string{ + "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", + }, + true: []string{ + "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", + }, +} + +func (b Bytes) Format(f fmt.State, c rune) { + switch c { + case 'f', 'g': + fv, unit, ok := b.floatValue(f.Flag('#')) + if !ok { + b.formatInt(&noPrecision{f}, 'd', true) + return + } + big.NewFloat(fv).Format(f, c) + io.WriteString(f, unit) + case 'd': + b.formatInt(f, c, f.Flag('#')) + default: + if f.Flag('#') { + fmt.Fprintf(f, "bytes(%d)", int64(b)) + } else { + fmt.Fprintf(f, "%g", b) + } + } +} + +func (b Bytes) formatInt(f fmt.State, c rune, withUnit bool) { + big.NewInt(int64(b)).Format(f, c) + if withUnit { + io.WriteString(f, "B") + } +} + +func (b Bytes) floatValue(binary bool) (float64, string, bool) { + i := 0 + var baseUnit Bytes = 1 + if b < 0 { + baseUnit *= -1 + } + for { + next := baseUnit + if binary { + next *= 1 << 10 + } else { + next *= 1e3 + } + if (baseUnit > 0 && b >= next) || (baseUnit < 0 && b <= next) { + i++ + baseUnit = next + continue + } + if i == 0 { + return 0, "", false + } + + return float64(b) / math.Abs(float64(baseUnit)), units[binary][i], true + } +} + +type noPrecision struct { + fmt.State +} + +func (*noPrecision) Precision() (prec int, ok bool) { + return 0, false +} diff --git a/vendor/github.com/tonistiigi/units/readme.md b/vendor/github.com/tonistiigi/units/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..5c67d30d43d18d5ed471721a2212ada29931375c --- /dev/null +++ b/vendor/github.com/tonistiigi/units/readme.md @@ -0,0 +1,29 @@ +#### Simple byte size formatting. + +This package implements types that can be used in stdlib formatting functions +like `fmt.Printf` to control the output of the expected printed string. + +Floating point flags `%f` and %g print the value in using the correct unit +suffix. Decimal units are default, `#` switches to binary units. If a value is +best represented as full bytes, integer bytes are printed instead. + +##### Examples: + +``` +fmt.Printf("%.2f", 123 * B) => "123B" +fmt.Printf("%.2f", 1234 * B) => "1.23kB" +fmt.Printf("%g", 1200 * B) => "1.2kB" +fmt.Printf("%#g", 1024 * B) => "1KiB" +``` + + +Integer flag `%d` always prints the value in bytes. `#` flag adds an unit prefix. + +##### Examples: + +``` +fmt.Printf("%d", 1234 * B) => "1234" +fmt.Printf("%#d", 1234 * B) => "1234B" +``` + +`%v` is equal to `%g` \ No newline at end of file diff --git a/vendor/golang.org/x/crypto/nacl/sign/sign.go b/vendor/golang.org/x/crypto/nacl/sign/sign.go new file mode 100644 index 0000000000000000000000000000000000000000..d07627019ef5acbeac37071356d4f819654d1096 --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/sign/sign.go @@ -0,0 +1,90 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sign signs small messages using public-key cryptography. +// +// Sign uses Ed25519 to sign messages. The length of messages is not hidden. +// Messages should be small because: +// 1. The whole message needs to be held in memory to be processed. +// 2. Using large messages pressures implementations on small machines to process +// plaintext without verifying the signature. This is very dangerous, and this API +// discourages it, but a protocol that uses excessive message sizes might present +// some implementations with no other choice. +// 3. Performance may be improved by working with messages that fit into data caches. +// Thus large amounts of data should be chunked so that each message is small. +// +// This package is not interoperable with the current release of NaCl +// (https://nacl.cr.yp.to/sign.html), which does not support Ed25519 yet. However, +// it is compatible with the NaCl fork libsodium (https://www.libsodium.org), as well +// as TweetNaCl (https://tweetnacl.cr.yp.to/). +package sign + +import ( + "io" + + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/internal/subtle" +) + +// Overhead is the number of bytes of overhead when signing a message. +const Overhead = 64 + +// GenerateKey generates a new public/private key pair suitable for use with +// Sign and Open. +func GenerateKey(rand io.Reader) (publicKey *[32]byte, privateKey *[64]byte, err error) { + pub, priv, err := ed25519.GenerateKey(rand) + if err != nil { + return nil, nil, err + } + publicKey, privateKey = new([32]byte), new([64]byte) + copy((*publicKey)[:], pub) + copy((*privateKey)[:], priv) + return publicKey, privateKey, nil +} + +// Sign appends a signed copy of message to out, which will be Overhead bytes +// longer than the original and must not overlap it. +func Sign(out, message []byte, privateKey *[64]byte) []byte { + sig := ed25519.Sign(ed25519.PrivateKey((*privateKey)[:]), message) + ret, out := sliceForAppend(out, Overhead+len(message)) + if subtle.AnyOverlap(out, message) { + panic("nacl: invalid buffer overlap") + } + copy(out, sig) + copy(out[Overhead:], message) + return ret +} + +// Open verifies a signed message produced by Sign and appends the message to +// out, which must not overlap the signed message. The output will be Overhead +// bytes smaller than the signed message. +func Open(out, signedMessage []byte, publicKey *[32]byte) ([]byte, bool) { + if len(signedMessage) < Overhead { + return nil, false + } + if !ed25519.Verify(ed25519.PublicKey((*publicKey)[:]), signedMessage[Overhead:], signedMessage[:Overhead]) { + return nil, false + } + ret, out := sliceForAppend(out, len(signedMessage)-Overhead) + if subtle.AnyOverlap(out, signedMessage) { + panic("nacl: invalid buffer overlap") + } + copy(out, signedMessage[Overhead:]) + return ret, true +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +}